* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
- #include <linux/module.h>
+ #include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/cpudata.h>
--- /dev/null
- #include <linux/module.h>
+/*
+ * BSG helper library
+ *
+ * Copyright (C) 2008 James Smart, Emulex Corporation
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/bsg-lib.h>
++#include <linux/export.h>
+#include <scsi/scsi_cmnd.h>
+
+/**
+ * bsg_destroy_job - routine to teardown/delete a bsg job
+ * @job: bsg_job that is to be torn down
+ */
+static void bsg_destroy_job(struct bsg_job *job)
+{
+ put_device(job->dev); /* release reference for the request */
+
+ kfree(job->request_payload.sg_list);
+ kfree(job->reply_payload.sg_list);
+ kfree(job);
+}
+
+/**
+ * bsg_job_done - completion routine for bsg requests
+ * @job: bsg_job that is complete
+ * @result: job reply result
+ * @reply_payload_rcv_len: length of payload recvd
+ *
+ * The LLD should call this when the bsg job has completed.
+ */
+void bsg_job_done(struct bsg_job *job, int result,
+ unsigned int reply_payload_rcv_len)
+{
+ struct request *req = job->req;
+ struct request *rsp = req->next_rq;
+ int err;
+
+ err = job->req->errors = result;
+ if (err < 0)
+ /* we're only returning the result field in the reply */
+ job->req->sense_len = sizeof(u32);
+ else
+ job->req->sense_len = job->reply_len;
+ /* we assume all request payload was transferred, residual == 0 */
+ req->resid_len = 0;
+
+ if (rsp) {
+ WARN_ON(reply_payload_rcv_len > rsp->resid_len);
+
+ /* set reply (bidi) residual */
+ rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
+ }
+ blk_complete_request(req);
+}
+EXPORT_SYMBOL_GPL(bsg_job_done);
+
+/**
+ * bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @rq: BSG request that holds the job to be destroyed
+ */
+static void bsg_softirq_done(struct request *rq)
+{
+ struct bsg_job *job = rq->special;
+
+ blk_end_request_all(rq, rq->errors);
+ bsg_destroy_job(job);
+}
+
+static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
+{
+ size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
+
+ BUG_ON(!req->nr_phys_segments);
+
+ buf->sg_list = kzalloc(sz, GFP_KERNEL);
+ if (!buf->sg_list)
+ return -ENOMEM;
+ sg_init_table(buf->sg_list, req->nr_phys_segments);
+ buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+ buf->payload_len = blk_rq_bytes(req);
+ return 0;
+}
+
+/**
+ * bsg_create_job - create the bsg_job structure for the bsg request
+ * @dev: device that is being sent the bsg request
+ * @req: BSG request that needs a job structure
+ */
+static int bsg_create_job(struct device *dev, struct request *req)
+{
+ struct request *rsp = req->next_rq;
+ struct request_queue *q = req->q;
+ struct bsg_job *job;
+ int ret;
+
+ BUG_ON(req->special);
+
+ job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ req->special = job;
+ job->req = req;
+ if (q->bsg_job_size)
+ job->dd_data = (void *)&job[1];
+ job->request = req->cmd;
+ job->request_len = req->cmd_len;
+ job->reply = req->sense;
+ job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
+ * allocated */
+ if (req->bio) {
+ ret = bsg_map_buffer(&job->request_payload, req);
+ if (ret)
+ goto failjob_rls_job;
+ }
+ if (rsp && rsp->bio) {
+ ret = bsg_map_buffer(&job->reply_payload, rsp);
+ if (ret)
+ goto failjob_rls_rqst_payload;
+ }
+ job->dev = dev;
+ /* take a reference for the request */
+ get_device(job->dev);
+ return 0;
+
+failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+failjob_rls_job:
+ kfree(job);
+ return -ENOMEM;
+}
+
+/*
+ * bsg_goose_queue - restart queue in case it was stopped
+ * @q: request q to be restarted
+ */
+void bsg_goose_queue(struct request_queue *q)
+{
+ if (!q)
+ return;
+
+ blk_run_queue_async(q);
+}
+EXPORT_SYMBOL_GPL(bsg_goose_queue);
+
+/**
+ * bsg_request_fn - generic handler for bsg requests
+ * @q: request queue to manage
+ *
+ * On error the create_bsg_job function should return a -Exyz error value
+ * that will be set to the req->errors.
+ *
+ * Drivers/subsys should pass this to the queue init function.
+ */
+void bsg_request_fn(struct request_queue *q)
+{
+ struct device *dev = q->queuedata;
+ struct request *req;
+ struct bsg_job *job;
+ int ret;
+
+ if (!get_device(dev))
+ return;
+
+ while (1) {
+ req = blk_fetch_request(q);
+ if (!req)
+ break;
+ spin_unlock_irq(q->queue_lock);
+
+ ret = bsg_create_job(dev, req);
+ if (ret) {
+ req->errors = ret;
+ blk_end_request_all(req, ret);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ job = req->special;
+ ret = q->bsg_job_fn(job);
+ spin_lock_irq(q->queue_lock);
+ if (ret)
+ break;
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ put_device(dev);
+ spin_lock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(bsg_request_fn);
+
+/**
+ * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
+ * @dev: device to attach bsg device to
+ * @q: request queue setup by caller
+ * @name: device to give bsg device
+ * @job_fn: bsg job handler
+ * @dd_job_size: size of LLD data needed for each job
+ *
+ * The caller should have setup the reuqest queue with bsg_request_fn
+ * as the request_fn.
+ */
+int bsg_setup_queue(struct device *dev, struct request_queue *q,
+ char *name, bsg_job_fn *job_fn, int dd_job_size)
+{
+ int ret;
+
+ q->queuedata = dev;
+ q->bsg_job_size = dd_job_size;
+ q->bsg_job_fn = job_fn;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, bsg_softirq_done);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
+
+ ret = bsg_register_queue(q, dev, name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - register queue\n", dev->kobj.name);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bsg_setup_queue);
+
+/**
+ * bsg_remove_queue - Deletes the bsg dev from the q
+ * @q: the request_queue that is to be torn down.
+ *
+ * Notes:
+ * Before unregistering the queue empty any requests that are blocked
+ */
+void bsg_remove_queue(struct request_queue *q)
+{
+ struct request *req; /* block request */
+ int counts; /* totals for request_list count and starved */
+
+ if (!q)
+ return;
+
+ /* Stop taking in new requests */
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+
+ /* drain all requests in the queue */
+ while (1) {
+ /* need the lock to fetch a request
+ * this may fetch the same reqeust as the previous pass
+ */
+ req = blk_fetch_request(q);
+ /* save requests in use and starved */
+ counts = q->rq.count[0] + q->rq.count[1] +
+ q->rq.starved[0] + q->rq.starved[1];
+ spin_unlock_irq(q->queue_lock);
+ /* any requests still outstanding? */
+ if (counts == 0)
+ break;
+
+ /* This may be the same req as the previous iteration,
+ * always send the blk_end_request_all after a prefetch.
+ * It is not okay to not end the request because the
+ * prefetch started the request.
+ */
+ if (req) {
+ /* return -ENXIO to indicate that this queue is
+ * going away
+ */
+ req->errors = -ENXIO;
+ blk_end_request_all(req, -ENXIO);
+ }
+
+ msleep(200); /* allow bsg to possibly finish */
+ spin_lock_irq(q->queue_lock);
+ }
+ bsg_unregister_queue(q);
+}
+EXPORT_SYMBOL_GPL(bsg_remove_queue);
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+ #include <linux/export.h>
+#include <linux/amba/bus.h>
#include <plat/ste_dma40.h>
*/
#include <linux/slab.h>
+ #include <linux/module.h>
#include "md.h"
+#include "raid1.h"
#include "raid5.h"
-#include "dm.h"
#include "bitmap.h"
+#include <linux/device-mapper.h>
+
#define DM_MSG_PREFIX "raid"
/*
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree.h"
+#include "dm-btree-internal.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+
+/*
+ * Removing an entry from a btree
+ * ==============================
+ *
+ * A very important constraint for our btree is that no node, except the
+ * root, may have fewer than a certain number of entries.
+ * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
+ *
+ * Ensuring this is complicated by the way we want to only ever hold the
+ * locks on 2 nodes concurrently, and only change nodes in a top to bottom
+ * fashion.
+ *
+ * Each node may have a left or right sibling. When decending the spine,
+ * if a node contains only MIN_ENTRIES then we try and increase this to at
+ * least MIN_ENTRIES + 1. We do this in the following ways:
+ *
+ * [A] No siblings => this can only happen if the node is the root, in which
+ * case we copy the childs contents over the root.
+ *
+ * [B] No left sibling
+ * ==> rebalance(node, right sibling)
+ *
+ * [C] No right sibling
+ * ==> rebalance(left sibling, node)
+ *
+ * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
+ * ==> delete node adding it's contents to left and right
+ *
+ * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
+ * ==> rebalance(left, node, right)
+ *
+ * After these operations it's possible that the our original node no
+ * longer contains the desired sub tree. For this reason this rebalancing
+ * is performed on the children of the current node. This also avoids
+ * having a special case for the root.
+ *
+ * Once this rebalancing has occurred we can then step into the child node
+ * for internal nodes. Or delete the entry for leaf nodes.
+ */
+
+/*
+ * Some little utilities for moving node data around.
+ */
+static void node_shift(struct node *n, int shift)
+{
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memmove(key_ptr(n, 0),
+ key_ptr(n, shift),
+ (nr_entries - shift) * sizeof(__le64));
+ memmove(value_ptr(n, 0, sizeof(__le64)),
+ value_ptr(n, shift, sizeof(__le64)),
+ (nr_entries - shift) * sizeof(__le64));
+ } else {
+ memmove(key_ptr(n, shift),
+ key_ptr(n, 0),
+ nr_entries * sizeof(__le64));
+ memmove(value_ptr(n, shift, sizeof(__le64)),
+ value_ptr(n, 0, sizeof(__le64)),
+ nr_entries * sizeof(__le64));
+ }
+}
+
+static void node_copy(struct node *left, struct node *right, int shift)
+{
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memcpy(key_ptr(left, nr_left),
+ key_ptr(right, 0),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(left, nr_left, sizeof(__le64)),
+ value_ptr(right, 0, sizeof(__le64)),
+ shift * sizeof(__le64));
+ } else {
+ memcpy(key_ptr(right, 0),
+ key_ptr(left, nr_left - shift),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(right, 0, sizeof(__le64)),
+ value_ptr(left, nr_left - shift, sizeof(__le64)),
+ shift * sizeof(__le64));
+ }
+}
+
+/*
+ * Delete a specific entry from a leaf node.
+ */
+static void delete_at(struct node *n, unsigned index, size_t value_size)
+{
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+
+ if (nr_to_copy) {
+ memmove(key_ptr(n, index),
+ key_ptr(n, index + 1),
+ nr_to_copy * sizeof(__le64));
+
+ memmove(value_ptr(n, index, value_size),
+ value_ptr(n, index + 1, value_size),
+ nr_to_copy * value_size);
+ }
+
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+}
+
+static unsigned del_threshold(struct node *n)
+{
+ return le32_to_cpu(n->header.max_entries) / 3;
+}
+
+static unsigned merge_threshold(struct node *n)
+{
+ /*
+ * The extra one is because we know we're potentially going to
+ * delete an entry.
+ */
+ return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
+}
+
+struct child {
+ unsigned index;
+ struct dm_block *block;
+ struct node *n;
+};
+
+static struct dm_btree_value_type le64_type = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL
+};
+
+static int init_child(struct dm_btree_info *info, struct node *parent,
+ unsigned index, struct child *result)
+{
+ int r, inc;
+ dm_block_t root;
+
+ result->index = index;
+ root = value64(parent, index);
+
+ r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
+ &result->block, &inc);
+ if (r)
+ return r;
+
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+ inc_children(info->tm, result->n, &le64_type);
+
+ return 0;
+}
+
+static int exit_child(struct dm_btree_info *info, struct child *c)
+{
+ return dm_tm_unlock(info->tm, c->block);
+}
+
+static void shift(struct node *left, struct node *right, int count)
+{
+ if (!count)
+ return;
+
+ if (count > 0) {
+ node_shift(right, count);
+ node_copy(left, right, count);
+ } else {
+ node_copy(left, right, count);
+ node_shift(right, count);
+ }
+
+ left->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+
+ right->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+}
+
+static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+
+ if (nr_left + nr_right <= merge_threshold(left)) {
+ /*
+ * Merge
+ */
+ node_copy(left, right, -nr_right);
+ left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ delete_at(parent, r->index, sizeof(__le64));
+
+ /*
+ * We need to decrement the right block, but not it's
+ * children, since they're still referenced by left.
+ */
+ dm_tm_dec(info->tm, dm_block_location(r->block));
+ } else {
+ /*
+ * Rebalance.
+ */
+ unsigned target_left = (nr_left + nr_right) / 2;
+
+ shift(left, right, nr_left - target_left);
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+}
+
+static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ __rebalance2(info, parent, &left, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *center = c->n;
+ struct node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+
+ unsigned target;
+
+ if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+ /*
+ * Delete center node:
+ *
+ * We dump as many entries from center as possible into
+ * left, then the rest in right, then rebalance2. This
+ * wastes some cpu, but I want something simple atm.
+ */
+ unsigned shift = min(max_entries - nr_left, nr_center);
+
+ node_copy(left, center, -shift);
+ left->header.nr_entries = cpu_to_le32(nr_left + shift);
+
+ if (shift != nr_center) {
+ shift = nr_center - shift;
+ node_shift(right, shift);
+ node_copy(center, right, shift);
+ right->header.nr_entries = cpu_to_le32(nr_right + shift);
+ }
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+
+ delete_at(parent, c->index, sizeof(__le64));
+ r->index--;
+
+ dm_tm_dec(info->tm, dm_block_location(c->block));
+ __rebalance2(info, parent, l, r);
+
+ return;
+ }
+
+ /*
+ * Rebalance
+ */
+ target = (nr_left + nr_center + nr_right) / 3;
+ BUG_ON(target == nr_center);
+
+ /*
+ * Adjust the left node
+ */
+ shift(left, center, nr_left - target);
+
+ /*
+ * Adjust the right node
+ */
+ shift(center, right, target - nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, c->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(c->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+
+ *key_ptr(parent, c->index) = center->keys[0];
+ *key_ptr(parent, r->index) = right->keys[0];
+}
+
+static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+ * FIXME: fill out an array?
+ */
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, ¢er);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ r = init_child(info, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, ¢er);
+ return r;
+ }
+
+ __rebalance3(info, parent, &left, ¢er, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, ¢er);
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, ¢er);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int get_nr_entries(struct dm_transaction_manager *tm,
+ dm_block_t b, uint32_t *result)
+{
+ int r;
+ struct dm_block *block;
+ struct node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+ return r;
+
+ n = dm_block_data(block);
+ *result = le32_to_cpu(n->header.nr_entries);
+
+ return dm_tm_unlock(tm, block);
+}
+
+static int rebalance_children(struct shadow_spine *s,
+ struct dm_btree_info *info, uint64_t key)
+{
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+ struct node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.nr_entries) == 1) {
+ struct dm_block *child;
+ dm_block_t b = value64(n, 0);
+
+ r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
+ if (r)
+ return r;
+
+ memcpy(n, dm_block_data(child),
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ r = dm_tm_unlock(info->tm, child);
+ dm_tm_dec(info->tm, dm_block_location(child));
+
+ return r;
+ }
+
+ i = lower_bound(n, key);
+ if (i < 0)
+ return -ENODATA;
+
+ r = get_nr_entries(info->tm, value64(n, i), &child_entries);
+ if (r)
+ return r;
+
+ if (child_entries > del_threshold(n))
+ return 0;
+
+ has_left_sibling = i > 0 ? 1 : 0;
+ has_right_sibling =
+ (i >= (le32_to_cpu(n->header.nr_entries) - 1)) ? 0 : 1;
+
+ if (!has_left_sibling)
+ r = rebalance2(s, info, i);
+
+ else if (!has_right_sibling)
+ r = rebalance2(s, info, i - 1);
+
+ else
+ r = rebalance3(s, info, i - 1);
+
+ return r;
+}
+
+static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+{
+ int i = lower_bound(n, key);
+
+ if ((i < 0) ||
+ (i >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[i]) != key))
+ return -ENODATA;
+
+ *index = i;
+
+ return 0;
+}
+
+/*
+ * Prepares for removal from one level of the hierarchy. The caller must
+ * actually call delete_at() to remove the entry at index.
+ */
+static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, dm_block_t root,
+ uint64_t key, unsigned *index)
+{
+ int i = *index, inc, r;
+ struct node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ break;
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s)) {
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ n = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(info->tm, n, vt);
+
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ r = rebalance_children(s, info, key);
+ if (r)
+ break;
+
+ n = dm_block_data(shadow_current(s));
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ i = lower_bound(n, key);
+
+ /*
+ * We know the key is present, or else
+ * rebalance_children would have returned
+ * -ENODATA
+ */
+ root = value64(n, i);
+ }
+
+ return r;
+}
+
+int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+{
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+ &info->value_type : &le64_type),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+
+ n = dm_block_data(shadow_current(&spine));
+ if (level != last_level) {
+ root = value64(n, index);
+ continue;
+ }
+
+ BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
+
+ if (info->value_type.dec)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+
+ delete_at(n, index, info->value_type.size);
+
+ r = 0;
+ *new_root = shadow_root(&spine);
+ }
+
+ exit_shadow_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_remove);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree-internal.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
+
+/*----------------------------------------------------------------
+ * Array manipulation
+ *--------------------------------------------------------------*/
+static void memcpy_disk(void *dest, const void *src, size_t len)
+ __dm_written_to_disk(src)
+{
+ memcpy(dest, src, len);
+ __dm_unbless_for_disk(src);
+}
+
+static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ unsigned index, void *elt)
+ __dm_written_to_disk(elt)
+{
+ if (index < nr_elts)
+ memmove(base + (elt_size * (index + 1)),
+ base + (elt_size * index),
+ (nr_elts - index) * elt_size);
+
+ memcpy_disk(base + (elt_size * index), elt, elt_size);
+}
+
+/*----------------------------------------------------------------*/
+
+/* makes the assumption that no two keys are the same. */
+static int bsearch(struct node *n, uint64_t key, int want_hi)
+{
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+ while (hi - lo > 1) {
+ int mid = lo + ((hi - lo) / 2);
+ uint64_t mid_key = le64_to_cpu(n->keys[mid]);
+
+ if (mid_key == key)
+ return mid;
+
+ if (mid_key < key)
+ lo = mid;
+ else
+ hi = mid;
+ }
+
+ return want_hi ? hi : lo;
+}
+
+int lower_bound(struct node *n, uint64_t key)
+{
+ return bsearch(n, key, 0);
+}
+
+void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ struct dm_btree_value_type *vt)
+{
+ unsigned i;
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
+ for (i = 0; i < nr_entries; i++)
+ dm_tm_inc(tm, value64(n, i));
+ else if (vt->inc)
+ for (i = 0; i < nr_entries; i++)
+ vt->inc(vt->context,
+ value_ptr(n, i, vt->size));
+}
+
+static int insert_at(size_t value_size, struct node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+{
+ uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ __le64 key_le = cpu_to_le64(key);
+
+ if (index > nr_entries ||
+ index >= le32_to_cpu(node->header.max_entries)) {
+ DMERR("too many entries in btree node for insert");
+ __dm_unbless_for_disk(value);
+ return -ENOMEM;
+ }
+
+ __dm_bless_for_disk(&key_le);
+
+ array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
+ array_insert(value_base(node), value_size, nr_entries, index, value);
+ node->header.nr_entries = cpu_to_le32(nr_entries + 1);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We want 3n entries (for some n). This works more nicely for repeated
+ * insert remove loops than (2n + 1).
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t block_size)
+{
+ uint32_t total, n;
+ size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
+
+ block_size -= sizeof(struct node_header);
+ total = block_size / elt_size;
+ n = total / 3; /* rounds down */
+
+ return 3 * n;
+}
+
+int dm_btree_create(struct dm_btree_info *info, dm_block_t *root)
+{
+ int r;
+ struct dm_block *b;
+ struct node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
+ max_entries = calc_max_entries(info->value_type.size, block_size);
+
+ n = dm_block_data(b);
+ memset(n, 0, block_size);
+ n->header.flags = cpu_to_le32(LEAF_NODE);
+ n->header.nr_entries = cpu_to_le32(0);
+ n->header.max_entries = cpu_to_le32(max_entries);
+ n->header.value_size = cpu_to_le32(info->value_type.size);
+
+ *root = dm_block_location(b);
+
+ return unlock_block(info, b);
+}
+EXPORT_SYMBOL_GPL(dm_btree_create);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Deletion uses a recursive algorithm, since we have limited stack space
+ * we explicitly manage our own stack on the heap.
+ */
+#define MAX_SPINE_DEPTH 64
+struct frame {
+ struct dm_block *b;
+ struct node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+};
+
+struct del_stack {
+ struct dm_transaction_manager *tm;
+ int top;
+ struct frame spine[MAX_SPINE_DEPTH];
+};
+
+static int top_frame(struct del_stack *s, struct frame **f)
+{
+ if (s->top < 0) {
+ DMERR("btree deletion stack empty");
+ return -EINVAL;
+ }
+
+ *f = s->spine + s->top;
+
+ return 0;
+}
+
+static int unprocessed_frames(struct del_stack *s)
+{
+ return s->top >= 0;
+}
+
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+{
+ int r;
+ uint32_t ref_count;
+
+ if (s->top >= MAX_SPINE_DEPTH - 1) {
+ DMERR("btree deletion stack out of memory");
+ return -ENOMEM;
+ }
+
+ r = dm_tm_ref(s->tm, b, &ref_count);
+ if (r)
+ return r;
+
+ if (ref_count > 1)
+ /*
+ * This is a shared node, so we can just decrement its
+ * reference counter and leave the children.
+ */
+ dm_tm_dec(s->tm, b);
+
+ else {
+ struct frame *f = s->spine + ++s->top;
+
+ r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
+ if (r) {
+ s->top--;
+ return r;
+ }
+
+ f->n = dm_block_data(f->b);
+ f->level = level;
+ f->nr_children = le32_to_cpu(f->n->header.nr_entries);
+ f->current_child = 0;
+ }
+
+ return 0;
+}
+
+static void pop_frame(struct del_stack *s)
+{
+ struct frame *f = s->spine + s->top--;
+
+ dm_tm_dec(s->tm, dm_block_location(f->b));
+ dm_tm_unlock(s->tm, f->b);
+}
+
+int dm_btree_destroy(struct dm_btree_info *info, dm_block_t root)
+{
+ int r;
+ struct del_stack *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->tm = info->tm;
+ s->top = -1;
+
+ r = push_frame(s, root, 1);
+ if (r)
+ goto out;
+
+ while (unprocessed_frames(s)) {
+ uint32_t flags;
+ struct frame *f;
+ dm_block_t b;
+
+ r = top_frame(s, &f);
+ if (r)
+ goto out;
+
+ if (f->current_child >= f->nr_children) {
+ pop_frame(s);
+ continue;
+ }
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level);
+ if (r)
+ goto out;
+
+ } else if (f->level != (info->levels - 1)) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level + 1);
+ if (r)
+ goto out;
+
+ } else {
+ if (info->value_type.dec) {
+ unsigned i;
+
+ for (i = 0; i < f->nr_children; i++)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(f->n, i, info->value_type.size));
+ }
+ f->current_child = f->nr_children;
+ }
+ }
+
+out:
+ kfree(s);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_destroy);
+
+int dm_btree_delete_gt(struct dm_btree_info *info, dm_block_t root, uint64_t *key,
+ dm_block_t *new_root)
+{
+ /* FIXME: implement */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_delete_gt);
+
+/*----------------------------------------------------------------*/
+
+static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int (*search_fn)(struct node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+{
+ int i, r;
+ uint32_t flags, nr_entries;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ i = search_fn(ro_node(s), key);
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (i < 0 || i >= nr_entries)
+ return -ENODATA;
+
+ if (flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (!(flags & LEAF_NODE));
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
+
+ return 0;
+}
+
+int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value_le)
+{
+ unsigned level, last_level = info->levels - 1;
+ int r = -ENODATA;
+ uint64_t rkey;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ size_t size;
+ void *value_p;
+
+ if (level == last_level) {
+ value_p = value_le;
+ size = info->value_type.size;
+
+ } else {
+ value_p = &internal_value_le;
+ size = sizeof(uint64_t);
+ }
+
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, &rkey,
+ value_p, size);
+
+ if (!r) {
+ if (rkey != keys[level]) {
+ exit_ro_spine(&spine);
+ return -ENODATA;
+ }
+ } else {
+ exit_ro_spine(&spine);
+ return r;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_lookup);
+
+/*
+ * Splits a node by creating a sibling node and shifting half the nodes
+ * contents across. Assumes there is a parent node, and it has room for
+ * another child.
+ *
+ * Before:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * |
+ * v
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * | |
+ * v +------+
+ * +---------+ |
+ * | A* +++ | v
+ * +---------+ +-------+
+ * | B +++ |
+ * +-------+
+ *
+ * Where A* is a shadow of A.
+ */
+static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ unsigned parent_index, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+ struct node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+
+ r = new_block(s->info, &right);
+ if (r < 0)
+ return r;
+
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
+
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+
+ rn->header.flags = ln->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = ln->header.max_entries;
+ rn->header.value_size = ln->header.value_size;
+ memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
+
+ size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
+ sizeof(uint64_t) : s->info->value_type.size;
+ memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
+ size * nr_right);
+
+ /*
+ * Patch up the parent
+ */
+ parent = shadow_parent(s);
+
+ pn = dm_block_data(parent);
+ location = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
+ &location, sizeof(__le64));
+
+ location = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&location);
+
+ r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ le64_to_cpu(rn->keys[0]), &location);
+ if (r)
+ return r;
+
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+
+ return 0;
+}
+
+/*
+ * Splits a node by creating two new children beneath the given node.
+ *
+ * Before:
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +------------+
+ * | A (shadow) |
+ * +------------+
+ * | |
+ * +------+ +----+
+ * | |
+ * v v
+ * +-------+ +-------+
+ * | B +++ | | C +++ |
+ * +-------+ +-------+
+ */
+static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+ struct node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+
+ r = new_block(s->info, &left);
+ if (r < 0)
+ return r;
+
+ r = new_block(s->info, &right);
+ if (r < 0) {
+ /* FIXME: put left */
+ return r;
+ }
+
+ pn = dm_block_data(new_parent);
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+
+ rn->header.flags = pn->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = pn->header.max_entries;
+ rn->header.value_size = pn->header.value_size;
+
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+ memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
+ memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
+ nr_right * size);
+
+ /* new_parent should just point to l and r now */
+ pn->header.flags = cpu_to_le32(INTERNAL_NODE);
+ pn->header.nr_entries = cpu_to_le32(2);
+ pn->header.max_entries = cpu_to_le32(
+ calc_max_entries(sizeof(__le64),
+ dm_bm_block_size(
+ dm_tm_get_bm(s->info->tm))));
+ pn->header.value_size = cpu_to_le32(sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&val);
+ pn->keys[0] = ln->keys[0];
+ memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&val);
+ pn->keys[1] = rn->keys[0];
+ memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
+
+ /*
+ * rejig the spine. This is ugly, since it knows too
+ * much about the spine
+ */
+ if (s->nodes[0] != new_parent) {
+ unlock_block(s->info, s->nodes[0]);
+ s->nodes[0] = new_parent;
+ }
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+ s->count = 2;
+
+ return 0;
+}
+
+static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ struct dm_btree_value_type *vt,
+ uint64_t key, unsigned *index)
+{
+ int r, i = *index, inc, top = 1;
+ struct node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ return r;
+
+ node = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(s->info->tm, node, vt);
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ if (node->header.nr_entries == node->header.max_entries) {
+ if (top)
+ r = btree_split_beneath(s, key);
+ else
+ r = btree_split_sibling(s, root, i, key);
+
+ if (r < 0)
+ return r;
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ i = lower_bound(node, key);
+
+ if (le32_to_cpu(node->header.flags) & LEAF_NODE)
+ break;
+
+ if (i < 0) {
+ /* change the bounds on the lowest key */
+ node->keys[0] = cpu_to_le64(key);
+ i = 0;
+ }
+
+ root = value64(node, i);
+ top = 0;
+ }
+
+ if (i < 0 || le64_to_cpu(node->keys[i]) != key)
+ i++;
+
+ /* we're about to overwrite this value, so undo the increment for it */
+ /* FIXME: shame that inc information is leaking outside the spine.
+ * Plus inc is just plain wrong in the event of a split */
+ if (le64_to_cpu(node->keys[i]) == key && inc)
+ if (vt->dec)
+ vt->dec(vt->context, value_ptr(node, i, vt->size));
+
+ *index = i;
+ return 0;
+}
+
+static int insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ int r, need_insert;
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+ struct node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+ le64_type.size = sizeof(__le64);
+ le64_type.inc = NULL;
+ le64_type.dec = NULL;
+ le64_type.equal = NULL;
+
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
+ r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ dm_block_t new_tree;
+ __le64 new_le;
+
+ r = dm_btree_create(info, &new_tree);
+ if (r < 0)
+ goto bad;
+
+ new_le = cpu_to_le64(new_tree);
+ __dm_bless_for_disk(&new_le);
+
+ r = insert_at(sizeof(uint64_t), n, index,
+ keys[level], &new_le);
+ if (r)
+ goto bad;
+ }
+
+ if (level < last_level)
+ block = value64(n, index);
+ }
+
+ r = btree_insert_raw(&spine, block, &info->value_type,
+ keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ if (inserted)
+ *inserted = 1;
+
+ r = insert_at(info->value_type.size, n, index,
+ keys[level], value);
+ if (r)
+ goto bad_unblessed;
+ } else {
+ if (inserted)
+ *inserted = 0;
+
+ if (info->value_type.dec &&
+ (!info->value_type.equal ||
+ !info->value_type.equal(
+ info->value_type.context,
+ value_ptr(n, index, info->value_type.size),
+ value))) {
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+ }
+ memcpy_disk(value_ptr(n, index, info->value_type.size),
+ value, info->value_type.size);
+ }
+
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return 0;
+
+bad:
+ __dm_unbless_for_disk(value);
+bad_unblessed:
+ exit_shadow_spine(&spine);
+ return r;
+}
+
+int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert);
+
+int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, inserted);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
+
+/*----------------------------------------------------------------*/
+
+int dm_btree_clone(struct dm_btree_info *info, dm_block_t root,
+ dm_block_t *clone)
+{
+ int r;
+ struct dm_block *b, *orig_b;
+ struct node *b_node, *orig_node;
+
+ /* Copy the root node */
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(info->tm, root, &btree_node_validator, &orig_b);
+ if (r < 0) {
+ dm_block_t location = dm_block_location(b);
+
+ unlock_block(info, b);
+ dm_tm_dec(info->tm, location);
+ }
+
+ *clone = dm_block_location(b);
+ b_node = dm_block_data(b);
+ orig_node = dm_block_data(orig_b);
+
+ memcpy(b_node, orig_node,
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ dm_tm_unlock(info->tm, orig_b);
+ inc_children(info->tm, b_node, &info->value_type);
+ dm_tm_unlock(info->tm, b);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_clone);
+
+/*----------------------------------------------------------------*/
+
+static int find_highest_key(struct ro_spine *s, dm_block_t block,
+ uint64_t *result_key, dm_block_t *next_block)
+{
+ int i, r;
+ uint32_t flags;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ i = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (!i)
+ return -ENODATA;
+ else
+ i--;
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ if (next_block || flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (flags & INTERNAL_NODE);
+
+ if (next_block)
+ *next_block = block;
+ return 0;
+}
+
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ int r = 0, count = 0, level;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = find_highest_key(&spine, root, result_keys + level,
+ level == info->levels - 1 ? NULL : &root);
+ if (r == -ENODATA) {
+ r = 0;
+ break;
+
+ } else if (r)
+ break;
+
+ count++;
+ }
+ exit_ro_spine(&spine);
+
+ return r ? r : count;
+}
+EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-common.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map disk"
+
+/*
+ * Bitmap validator
+ */
+static void bitmap_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+
+ disk_header->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_header->csum = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+}
+
+static int bitmap_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
+ DMERR("bitmap check failed blocknr %llu wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+ if (csum_disk != disk_header->csum) {
+ DMERR("bitmap check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+struct dm_block_validator dm_sm_bitmap_validator = {
+ .name = "sm_bitmap",
+ .prepare_for_write = bitmap_prepare_for_write,
+ .check = bitmap_check
+};
+
+/*----------------------------------------------------------------*/
+
+#define ENTRIES_PER_WORD 32
+#define ENTRIES_SHIFT 5
+
+void *dm_bitmap_data(struct dm_block *b)
+{
+ return dm_block_data(b) + sizeof(struct disk_bitmap_header);
+}
+
+#define WORD_MASK_LOW 0x5555555555555555ULL
+#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+#define WORD_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned bitmap_word_used(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ uint64_t bits = le64_to_cpu(*w_le);
+
+ return ((bits & WORD_MASK_LOW) == WORD_MASK_LOW ||
+ (bits & WORD_MASK_HIGH) == WORD_MASK_HIGH ||
+ (bits & WORD_MASK_ALL) == WORD_MASK_ALL);
+}
+
+unsigned sm_lookup_bitmap(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ return (!!test_bit_le(b, (void *) w_le) << 1) |
+ (!!test_bit_le(b + 1, (void *) w_le));
+}
+
+void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ if (val & 2)
+ __set_bit_le(b, (void *) w_le);
+ else
+ __clear_bit_le(b, (void *) w_le);
+
+ if (val & 1)
+ __set_bit_le(b + 1, (void *) w_le);
+ else
+ __clear_bit_le(b + 1, (void *) w_le);
+}
+
+int sm_find_free(void *addr, unsigned begin, unsigned end,
+ unsigned *result)
+{
+ while (begin < end) {
+ if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+ bitmap_word_used(addr, begin)) {
+ begin += ENTRIES_PER_WORD;
+ continue;
+ }
+
+ if (!sm_lookup_bitmap(addr, begin)) {
+ *result = begin;
+ return 0;
+ }
+
+ begin++;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_init(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ io->tm = tm;
+ io->bitmap_info.tm = tm;
+ io->bitmap_info.levels = 1;
+
+ /*
+ * Because the new bitmap blocks are created via a shadow
+ * operation, the old entry has already had its reference count
+ * decremented and we don't need the btree to do any bookkeeping.
+ */
+ io->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
+ io->bitmap_info.value_type.inc = NULL;
+ io->bitmap_info.value_type.dec = NULL;
+ io->bitmap_info.value_type.equal = NULL;
+
+ io->ref_count_info.tm = tm;
+ io->ref_count_info.levels = 1;
+ io->ref_count_info.value_type.size = sizeof(uint32_t);
+ io->ref_count_info.value_type.inc = NULL;
+ io->ref_count_info.value_type.dec = NULL;
+ io->ref_count_info.value_type.equal = NULL;
+
+ io->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
+
+ if (io->block_size > (1 << 30)) {
+ DMERR("block size too big to hold bitmaps");
+ return -EINVAL;
+ }
+
+ io->entries_per_block = (io->block_size - sizeof(struct disk_bitmap_header)) *
+ ENTRIES_PER_BYTE;
+ io->nr_blocks = 0;
+ io->bitmap_root = 0;
+ io->ref_count_root = 0;
+
+ return 0;
+}
+
+static int disk_ll_new(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ int r;
+
+ r = disk_ll_init(io, tm);
+ if (r < 0)
+ return r;
+
+ io->nr_blocks = 0;
+ io->nr_allocated = 0;
+ r = dm_btree_create(&io->bitmap_info, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ r = dm_btree_create(&io->ref_count_info, &io->ref_count_root);
+ if (r < 0) {
+ dm_btree_destroy(&io->bitmap_info, io->bitmap_root);
+ return r;
+ }
+
+ return 0;
+}
+
+static int disk_ll_extend(struct ll_disk *io, dm_block_t extra_blocks)
+{
+ int r;
+ dm_block_t i, nr_blocks;
+ unsigned old_blocks, blocks;
+
+ nr_blocks = io->nr_blocks + extra_blocks;
+ old_blocks = dm_sector_div_up(io->nr_blocks, io->entries_per_block);
+ blocks = dm_sector_div_up(nr_blocks, io->entries_per_block);
+
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+
+ r = dm_tm_new_block(io->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(io->tm, b);
+ if (r < 0)
+ return r;
+
+ idx.nr_free = cpu_to_le32(io->entries_per_block);
+ idx.none_free_before = 0;
+ __dm_bless_for_disk(&idx);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root,
+ &i, &idx, &io->bitmap_root);
+ if (r < 0)
+ return r;
+ }
+
+ io->nr_blocks = nr_blocks;
+ return 0;
+}
+
+static int disk_ll_open(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct disk_sm_root *smr = root_le;
+
+ if (len < sizeof(struct disk_sm_root)) {
+ DMERR("sm_disk root too small");
+ return -ENOMEM;
+ }
+
+ r = disk_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+
+ return 0;
+}
+
+static int disk_ll_lookup_bitmap(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ int r;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ struct dm_block *blk;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ *result = sm_lookup_bitmap(dm_bitmap_data(blk), do_div(b, io->entries_per_block));
+
+ return dm_tm_unlock(io->tm, blk);
+}
+
+static int disk_ll_lookup(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ __le32 rc_le;
+ int r = disk_ll_lookup_bitmap(io, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ r = dm_btree_lookup(&io->ref_count_info, io->ref_count_root, &b, &rc_le);
+ if (r < 0)
+ return r;
+
+ *result = le32_to_cpu(rc_le);
+
+ return r;
+}
+
+static int disk_ll_find_free_block(struct ll_disk *io, dm_block_t begin,
+ dm_block_t end, dm_block_t *result)
+{
+ int r;
+ struct disk_index_entry ie_disk;
+ dm_block_t i, index_begin = begin;
+ dm_block_t index_end = dm_sector_div_up(end, io->entries_per_block);
+
+ begin = do_div(index_begin, io->entries_per_block);
+
+ for (i = index_begin; i < index_end; i++, begin = 0) {
+ struct dm_block *blk;
+ unsigned position;
+ uint32_t bit_end;
+
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &i, &ie_disk);
+ if (r < 0)
+ return r;
+
+ if (le32_to_cpu(ie_disk.nr_free) <= 0)
+ continue;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ bit_end = (i == index_end - 1) ?
+ do_div(end, io->entries_per_block) : io->entries_per_block;
+
+ r = sm_find_free(dm_bitmap_data(blk),
+ max((unsigned)begin, (unsigned)le32_to_cpu(ie_disk.none_free_before)),
+ bit_end, &position);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, blk);
+ continue;
+ }
+
+ r = dm_tm_unlock(io->tm, blk);
+ if (r < 0)
+ return r;
+
+ *result = i * io->entries_per_block + (dm_block_t) position;
+
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_insert(struct ll_disk *io, dm_block_t b, uint32_t ref_count)
+{
+ int r;
+ uint32_t bit, old;
+ struct dm_block *nb;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ void *bm_le;
+ int inc;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_shadow_block(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &nb, &inc);
+ if (r < 0) {
+ DMERR("dm_tm_shadow_block() failed");
+ return r;
+ }
+ ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
+
+ bm_le = dm_bitmap_data(nb);
+ bit = do_div(b, io->entries_per_block);
+ old = sm_lookup_bitmap(bm_le, bit);
+
+ if (ref_count <= 2) {
+ sm_set_bitmap(bm_le, bit, ref_count);
+
+ if (old > 2) {
+ r = dm_btree_remove(&io->ref_count_info, io->ref_count_root,
+ &b, &io->ref_count_root);
+ if (r) {
+ dm_tm_unlock(io->tm, nb);
+ return r;
+ }
+ }
+ } else {
+ __le32 rc_le = cpu_to_le32(ref_count);
+
+ __dm_bless_for_disk(&rc_le);
+
+ sm_set_bitmap(bm_le, bit, 3);
+ r = dm_btree_insert(&io->ref_count_info, io->ref_count_root,
+ &b, &rc_le, &io->ref_count_root);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, nb);
+ DMERR("ref count insert failed");
+ return r;
+ }
+ }
+
+ r = dm_tm_unlock(io->tm, nb);
+ if (r < 0)
+ return r;
+
+ if (ref_count && !old) {
+ io->nr_allocated++;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
+ if (le32_to_cpu(ie_disk.none_free_before) == b)
+ ie_disk.none_free_before = cpu_to_le32(b + 1);
+
+ } else if (old && !ref_count) {
+ io->nr_allocated--;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
+ ie_disk.none_free_before = cpu_to_le32(min((dm_block_t) le32_to_cpu(ie_disk.none_free_before), b));
+ }
+
+ __dm_bless_for_disk(&ie_disk);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, &index, &ie_disk, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int disk_ll_inc(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ return disk_ll_insert(ll, b, rc + 1);
+}
+
+static int disk_ll_dec(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ if (!rc)
+ return -EINVAL;
+
+ return disk_ll_insert(ll, b, rc - 1);
+}
+
+/*--------------------------------------------------------------*/
+
+/*
+ * Space map interface.
+ */
+struct sm_disk {
+ struct dm_space_map sm;
+
+ struct ll_disk ll;
+};
+
+static void sm_disk_destroy(struct dm_space_map *sm)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ kfree(smd);
+}
+
+static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_extend(&smd->ll, extra_blocks);
+}
+
+static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks;
+
+ return 0;
+}
+
+static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks - smd->ll.nr_allocated;
+
+ return 0;
+}
+
+static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_lookup(&smd->ll, b, result);
+}
+
+static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
+ int *result)
+{
+ int r;
+ uint32_t count;
+
+ r = sm_disk_get_count(sm, b, &count);
+ if (r)
+ return r;
+
+ return count > 1;
+}
+
+static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_insert(&smd->ll, b, count);
+}
+
+static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_inc(&smd->ll, b);
+}
+
+static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_dec(&smd->ll, b);
+}
+
+static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ /*
+ * FIXME: We should start the search where we left off.
+ */
+ r = disk_ll_find_free_block(&smd->ll, 0, smd->ll.nr_blocks, b);
+ if (r)
+ return r;
+
+ return disk_ll_inc(&smd->ll, *b);
+}
+
+static int sm_disk_commit(struct dm_space_map *sm)
+{
+ return 0;
+}
+
+static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
+{
+ *result = sizeof(struct disk_sm_root);
+
+ return 0;
+}
+
+static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ struct disk_sm_root root_le;
+
+ root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
+ root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
+ root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
+ root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
+
+ if (max < sizeof(root_le))
+ return -ENOSPC;
+
+ memcpy(where_le, &root_le, sizeof(root_le));
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_space_map ops = {
+ .destroy = sm_disk_destroy,
+ .extend = sm_disk_extend,
+ .get_nr_blocks = sm_disk_get_nr_blocks,
+ .get_nr_free = sm_disk_get_nr_free,
+ .get_count = sm_disk_get_count,
+ .count_is_more_than_one = sm_disk_count_is_more_than_one,
+ .set_count = sm_disk_set_count,
+ .inc_block = sm_disk_inc_block,
+ .dec_block = sm_disk_dec_block,
+ .new_block = sm_disk_new_block,
+ .commit = sm_disk_commit,
+ .root_size = sm_disk_root_size,
+ .copy_root = sm_disk_copy_root
+};
+
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_new(&smd->ll, tm);
+ if (r)
+ goto bad;
+
+ r = disk_ll_extend(&smd->ll, nr_blocks);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_open(&smd->ll, tm, root_le, len);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_open);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+#include "dm-transaction-manager.h"
+#include "dm-space-map.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map-metadata.h"
+#include "dm-persistent-data-internal.h"
+
+#include <linux/slab.h>
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "transaction manager"
+
+/*----------------------------------------------------------------*/
+
+struct shadow_info {
+ struct hlist_node hlist;
+ dm_block_t where;
+};
+
+/*
+ * It would be nice if we scaled with the size of transaction.
+ */
+#define HASH_SIZE 256
+#define HASH_MASK (HASH_SIZE - 1)
+
+struct dm_transaction_manager {
+ int is_clone;
+ struct dm_transaction_manager *real;
+
+ struct dm_block_manager *bm;
+ struct dm_space_map *sm;
+
+ spinlock_t lock;
+ struct hlist_head buckets[HASH_SIZE];
+};
+
+/*----------------------------------------------------------------*/
+
+static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ unsigned bucket = dm_hash_block(b, HASH_MASK);
+ struct shadow_info *si;
+ struct hlist_node *n;
+ int r = 0;
+
+ spin_lock(&tm->lock);
+
+ hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ if (si->where == b) {
+ r = 1;
+ break;
+ }
+
+ spin_unlock(&tm->lock);
+
+ return r;
+}
+
+/*
+ * This can silently fail if there's no memory. We're ok with this since
+ * creating redundant shadows causes no harm.
+ */
+static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ unsigned bucket;
+ struct shadow_info *si;
+
+ si = kmalloc(sizeof(*si), GFP_NOIO);
+ if (si) {
+ si->where = b;
+ bucket = dm_hash_block(b, HASH_MASK);
+
+ spin_lock(&tm->lock);
+ hlist_add_head(&si->hlist, tm->buckets + bucket);
+ spin_unlock(&tm->lock);
+ }
+}
+
+static void wipe_shadow_table(struct dm_transaction_manager *tm)
+{
+ struct shadow_info *si;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ int i;
+
+ spin_lock(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++) {
+ bucket = tm->buckets + i;
+ hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ kfree(si);
+
+ INIT_HLIST_HEAD(bucket);
+ }
+ spin_unlock(&tm->lock);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
+ struct dm_space_map *sm)
+{
+ int i;
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (!tm)
+ return ERR_PTR(-ENOMEM);
+
+ tm->is_clone = 0;
+ tm->real = NULL;
+ tm->bm = bm;
+ tm->sm = sm;
+
+ spin_lock_init(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++)
+ INIT_HLIST_HEAD(tm->buckets + i);
+
+ return tm;
+}
+
+struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
+{
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (tm) {
+ tm->is_clone = 1;
+ tm->real = real;
+ }
+
+ return tm;
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+void dm_tm_destroy(struct dm_transaction_manager *tm)
+{
+ kfree(tm);
+}
+EXPORT_SYMBOL_GPL(dm_tm_destroy);
+
+int dm_tm_pre_commit(struct dm_transaction_manager *tm)
+{
+ int r;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_commit(tm->sm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ wipe_shadow_table(tm);
+
+ return dm_bm_flush_and_unlock(tm->bm, root);
+}
+EXPORT_SYMBOL_GPL(dm_tm_commit);
+
+int dm_tm_new_block(struct dm_transaction_manager *tm,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ dm_block_t new_block;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_new_block(tm->sm, &new_block);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
+ if (r < 0) {
+ dm_sm_dec_block(tm->sm, new_block);
+ return r;
+ }
+
+ /*
+ * New blocks count as shadows in that they don't need to be
+ * shadowed again.
+ */
+ insert_shadow(tm, new_block);
+
+ return 0;
+}
+
+static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v,
+ struct dm_block **result, int *inc_children)
+{
+ int r;
+ dm_block_t new;
+ uint32_t count;
+ struct dm_block *orig_block;
+
+ r = dm_sm_new_block(tm->sm, &new);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new, v, result);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ memcpy(dm_block_data(*result), dm_block_data(orig_block),
+ dm_bm_block_size(tm->bm));
+
+ r = dm_bm_unlock(orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_sm_get_count(tm->sm, orig, &count);
+ if (r < 0)
+ goto bad;
+
+ r = dm_sm_dec_block(tm->sm, orig);
+ if (r < 0)
+ goto bad;
+
+ *inc_children = count > 1;
+
+ return 0;
+
+bad:
+ dm_bm_unlock(*result);
+bad_dec_block:
+ dm_sm_dec_block(tm->sm, new);
+
+ return r;
+}
+
+int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v, struct dm_block **result,
+ int *inc_children)
+{
+ int r, more_than_one;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ if (is_shadow(tm, orig)) {
+ r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one);
+ if (r < 0)
+ return r;
+
+ if (!more_than_one) {
+ *inc_children = 0;
+ return dm_bm_write_lock(tm->bm, orig, v, result);
+ }
+ /* fall through */
+ }
+
+ r = __shadow_block(tm, orig, v, result, inc_children);
+ if (r < 0)
+ return r;
+
+ insert_shadow(tm, dm_block_location(*result));
+
+ return r;
+}
+
+int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **blk)
+{
+ if (tm->is_clone)
+ return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
+
+ return dm_bm_read_lock(tm->bm, b, v, blk);
+}
+
+int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+{
+ return dm_bm_unlock(b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_unlock);
+
+void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_inc_block(tm->sm, b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_inc);
+
+void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_dec_block(tm->sm, b);
+}
+
+int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
+ uint32_t *result)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ return dm_sm_get_count(tm->sm, b, result);
+}
+
+struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
+{
+ return tm->bm;
+}
+
+/*----------------------------------------------------------------*/
+
+static int dm_tm_create_internal(struct dm_block_manager *bm,
+ dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm,
+ struct dm_block **sblock,
+ int create)
+{
+ int r;
+
+ *sm = dm_sm_metadata_init();
+ if (IS_ERR(*sm))
+ return PTR_ERR(*sm);
+
+ *tm = dm_tm_create(bm, *sm);
+ if (IS_ERR(*tm)) {
+ dm_sm_destroy(*sm);
+ return PTR_ERR(*tm);
+ }
+
+ if (create) {
+ r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
+ sb_location);
+ if (r) {
+ DMERR("couldn't create metadata space map");
+ goto bad2;
+ }
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_open(*sm, *tm,
+ dm_block_data(*sblock) + root_offset,
+ root_max_len);
+ if (IS_ERR(*sm)) {
+ DMERR("couldn't open metadata space map");
+ goto bad2;
+ }
+ }
+
+ return 0;
+
+bad2:
+ dm_tm_unlock(*tm, *sblock);
+bad1:
+ dm_tm_destroy(*tm);
+ dm_sm_destroy(*sm);
+ return r;
+}
+
+int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator,
+ 0, 0, tm, sm, sblock, 1);
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
+
+int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
+ root_max_len, tm, sm, sblock, 0);
+}
+EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
+ #include <linux/module.h>
-enum max17042_register {
- MAX17042_STATUS = 0x00,
- MAX17042_VALRT_Th = 0x01,
- MAX17042_TALRT_Th = 0x02,
- MAX17042_SALRT_Th = 0x03,
- MAX17042_AtRate = 0x04,
- MAX17042_RepCap = 0x05,
- MAX17042_RepSOC = 0x06,
- MAX17042_Age = 0x07,
- MAX17042_TEMP = 0x08,
- MAX17042_VCELL = 0x09,
- MAX17042_Current = 0x0A,
- MAX17042_AvgCurrent = 0x0B,
- MAX17042_Qresidual = 0x0C,
- MAX17042_SOC = 0x0D,
- MAX17042_AvSOC = 0x0E,
- MAX17042_RemCap = 0x0F,
- MAX17402_FullCAP = 0x10,
- MAX17042_TTE = 0x11,
- MAX17042_V_empty = 0x12,
-
- MAX17042_RSLOW = 0x14,
-
- MAX17042_AvgTA = 0x16,
- MAX17042_Cycles = 0x17,
- MAX17042_DesignCap = 0x18,
- MAX17042_AvgVCELL = 0x19,
- MAX17042_MinMaxTemp = 0x1A,
- MAX17042_MinMaxVolt = 0x1B,
- MAX17042_MinMaxCurr = 0x1C,
- MAX17042_CONFIG = 0x1D,
- MAX17042_ICHGTerm = 0x1E,
- MAX17042_AvCap = 0x1F,
- MAX17042_ManName = 0x20,
- MAX17042_DevName = 0x21,
- MAX17042_DevChem = 0x22,
-
- MAX17042_TempNom = 0x24,
- MAX17042_TempCold = 0x25,
- MAX17042_TempHot = 0x26,
- MAX17042_AIN = 0x27,
- MAX17042_LearnCFG = 0x28,
- MAX17042_SHFTCFG = 0x29,
- MAX17042_RelaxCFG = 0x2A,
- MAX17042_MiscCFG = 0x2B,
- MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
- MAX17042_CGAIN = 0x2E,
- MAX17042_COFF = 0x2F,
-
- MAX17042_Q_empty = 0x33,
- MAX17042_T_empty = 0x34,
-
- MAX17042_RCOMP0 = 0x38,
- MAX17042_TempCo = 0x39,
- MAX17042_Rx = 0x3A,
- MAX17042_T_empty0 = 0x3B,
- MAX17042_TaskPeriod = 0x3C,
- MAX17042_FSTAT = 0x3D,
-
- MAX17042_SHDNTIMER = 0x3F,
-
- MAX17042_VFRemCap = 0x4A,
-
- MAX17042_QH = 0x4D,
- MAX17042_QL = 0x4E,
-};
-
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
#include <linux/power_supply.h>
#include <linux/platform_device.h>
#include <linux/power/max8903_charger.h>
+ #include <linux/module.h>
struct max8903_data {
- struct max8903_pdata *pdata;
+ struct max8903_pdata pdata;
struct device *dev;
struct power_supply psy;
bool fault;
--- /dev/null
+/*
+ * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
++#include <linux/module.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct charger_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8997_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8997_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_data *charger = container_of(psy,
+ struct charger_data, battery);
+ struct i2c_client *i2c = charger->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, ®);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 0)) == 0x1)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, ®);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 2)) == 0x0)
+ val->intval = 1;
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, ®);
+ if (ret)
+ return ret;
+ /* DCINOK */
+ if (reg & (1 << 1))
+ val->intval = 1;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8997_battery_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct charger_data *charger;
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->eoc_mA) {
+ u8 val = (pdata->eoc_mA - 50) / 10;
+ if (val < 0)
+ val = 0;
+ if (val > 0xf)
+ val = 0xf;
+
+ ret = max8997_update_reg(iodev->i2c,
+ MAX8997_REG_MBCCTRL5, val, 0xf);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+ }
+
+ switch (pdata->timeout) {
+ case 5:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << 4, 0x7 << 4);
+ break;
+ case 6:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << 4, 0x7 << 4);
+ break;
+ case 7:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << 4, 0x7 << 4);
+ break;
+ case 0:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << 4, 0x7 << 4);
+ break;
+ default:
+ dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
+ pdata->timeout);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+
+ charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
+ if (charger == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, charger);
+
+ charger->battery.name = "max8997_pmic";
+ charger->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->battery.get_property = max8997_battery_get_property;
+ charger->battery.properties = max8997_battery_props;
+ charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props);
+
+ charger->dev = &pdev->dev;
+ charger->iodev = iodev;
+
+ ret = power_supply_register(&pdev->dev, &charger->battery);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit max8997_battery_remove(struct platform_device *pdev)
+{
+ struct charger_data *charger = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&charger->battery);
+ kfree(charger);
+ return 0;
+}
+
+static const struct platform_device_id max8997_battery_id[] = {
+ { "max8997-battery", 0 },
+};
+
+static struct platform_driver max8997_battery_driver = {
+ .driver = {
+ .name = "max8997-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_battery_probe,
+ .remove = __devexit_p(max8997_battery_remove),
+ .id_table = max8997_battery_id,
+};
+
+static int __init max8997_battery_init(void)
+{
+ return platform_driver_register(&max8997_battery_driver);
+}
+subsys_initcall(max8997_battery_init);
+
+static void __exit max8997_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8997_battery_driver);
+}
+module_exit(max8997_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
++#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_battery_data {
+ struct device *dev;
+ struct max8998_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8998_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8998_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8998_battery_data *max8998 = container_of(psy,
+ struct max8998_battery_data, battery);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, ®);
+ if (ret)
+ return ret;
+ if (reg & (1 << 4))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, ®);
+ if (ret)
+ return ret;
+ if (reg & (1 << 3))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8998_battery_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_battery_data *max8998;
+ struct i2c_client *i2c;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
+ if (!max8998)
+ return -ENOMEM;
+
+ max8998->dev = &pdev->dev;
+ max8998->iodev = iodev;
+ platform_set_drvdata(pdev, max8998);
+ i2c = max8998->iodev->i2c;
+
+ /* Setup "End of Charge" */
+ /* If EOC value equals 0,
+ * remain value set from bootloader or default value */
+ if (pdata->eoc >= 10 && pdata->eoc <= 45) {
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1,
+ (pdata->eoc / 5 - 2) << 5, 0x7 << 5);
+ } else if (pdata->eoc == 0) {
+ dev_dbg(max8998->dev,
+ "EOC value not set: leave it unchanged.\n");
+ } else {
+ dev_err(max8998->dev, "Invalid EOC value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Restart Level */
+ switch (pdata->restart) {
+ case 100:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x1 << 3, 0x3 << 3);
+ break;
+ case 150:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x0 << 3, 0x3 << 3);
+ break;
+ case 200:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x2 << 3, 0x3 << 3);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x3 << 3, 0x3 << 3);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Restart Level not set: leave it unchanged.\n");
+ break;
+ default:
+ dev_err(max8998->dev, "Invalid Restart Level\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Full Timeout */
+ switch (pdata->timeout) {
+ case 5:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x0 << 4, 0x3 << 4);
+ break;
+ case 6:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x1 << 4, 0x3 << 4);
+ break;
+ case 7:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x2 << 4, 0x3 << 4);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x3 << 4, 0x3 << 4);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Full Timeout not set: leave it unchanged.\n");
+ default:
+ dev_err(max8998->dev, "Invalid Full Timeout value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ max8998->battery.name = "max8998_pmic";
+ max8998->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ max8998->battery.get_property = max8998_battery_get_property;
+ max8998->battery.properties = max8998_battery_props;
+ max8998->battery.num_properties = ARRAY_SIZE(max8998_battery_props);
+
+ ret = power_supply_register(max8998->dev, &max8998->battery);
+ if (ret) {
+ dev_err(max8998->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(max8998);
+ return ret;
+}
+
+static int __devexit max8998_battery_remove(struct platform_device *pdev)
+{
+ struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&max8998->battery);
+ kfree(max8998);
+
+ return 0;
+}
+
+static const struct platform_device_id max8998_battery_id[] = {
+ { "max8998-battery", TYPE_MAX8998 },
+};
+
+static struct platform_driver max8998_battery_driver = {
+ .driver = {
+ .name = "max8998-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_battery_probe,
+ .remove = __devexit_p(max8998_battery_remove),
+ .id_table = max8998_battery_id,
+};
+
+static int __init max8998_battery_init(void)
+{
+ return platform_driver_register(&max8998_battery_driver);
+}
+module_init(max8998_battery_init);
+
+static void __exit max8998_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8998_battery_driver);
+}
+module_exit(max8998_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8998-battery");
--- /dev/null
+/*
+ * linux/drivers/regulator/aat2870-regulator.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
++#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_regulator {
+ struct platform_device *pdev;
+ struct regulator_desc desc;
+
+ const int *voltages; /* uV */
+
+ int min_uV;
+ int max_uV;
+
+ u8 enable_addr;
+ u8 enable_shift;
+ u8 enable_mask;
+
+ u8 voltage_addr;
+ u8 voltage_shift;
+ u8 voltage_mask;
+};
+
+static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->voltages[selector];
+}
+
+static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
+ (selector << ri->voltage_shift) & ri->voltage_mask);
+}
+
+static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->voltage_addr, &val);
+ if (ret)
+ return ret;
+
+ return (val & ri->voltage_mask) >> ri->voltage_shift;
+}
+
+static int aat2870_ldo_enable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
+ ri->enable_mask);
+}
+
+static int aat2870_ldo_disable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
+}
+
+static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->enable_addr, &val);
+ if (ret)
+ return ret;
+
+ return val & ri->enable_mask ? 1 : 0;
+}
+
+static struct regulator_ops aat2870_ldo_ops = {
+ .list_voltage = aat2870_ldo_list_voltage,
+ .set_voltage_sel = aat2870_ldo_set_voltage_sel,
+ .get_voltage_sel = aat2870_ldo_get_voltage_sel,
+ .enable = aat2870_ldo_enable,
+ .disable = aat2870_ldo_disable,
+ .is_enabled = aat2870_ldo_is_enabled,
+};
+
+static const int aat2870_ldo_voltages[] = {
+ 1200000, 1300000, 1500000, 1600000,
+ 1800000, 2000000, 2200000, 2500000,
+ 2600000, 2700000, 2800000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+#define AAT2870_LDO(ids) \
+ { \
+ .desc = { \
+ .name = #ids, \
+ .id = AAT2870_ID_##ids, \
+ .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .ops = &aat2870_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .voltages = aat2870_ldo_voltages, \
+ .min_uV = 1200000, \
+ .max_uV = 3300000, \
+ }
+
+static struct aat2870_regulator aat2870_regulators[] = {
+ AAT2870_LDO(LDOA),
+ AAT2870_LDO(LDOB),
+ AAT2870_LDO(LDOC),
+ AAT2870_LDO(LDOD),
+};
+
+static struct aat2870_regulator *aat2870_get_regulator(int id)
+{
+ struct aat2870_regulator *ri = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
+ ri = &aat2870_regulators[i];
+ if (ri->desc.id == id)
+ break;
+ }
+
+ if (!ri)
+ return NULL;
+
+ ri->enable_addr = AAT2870_LDO_EN;
+ ri->enable_shift = id - AAT2870_ID_LDOA;
+ ri->enable_mask = 0x1 << ri->enable_shift;
+
+ ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
+ AAT2870_LDO_CD : AAT2870_LDO_AB;
+ ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
+ ri->voltage_mask = 0xF << ri->voltage_shift;
+
+ return ri;
+}
+
+static int aat2870_regulator_probe(struct platform_device *pdev)
+{
+ struct aat2870_regulator *ri;
+ struct regulator_dev *rdev;
+
+ ri = aat2870_get_regulator(pdev->id);
+ if (!ri) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ return -EINVAL;
+ }
+ ri->pdev = pdev;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdev->dev.platform_data, ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver aat2870_regulator_driver = {
+ .driver = {
+ .name = "aat2870-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_regulator_probe,
+ .remove = __devexit_p(aat2870_regulator_remove),
+};
+
+static int __init aat2870_regulator_init(void)
+{
+ return platform_driver_register(&aat2870_regulator_driver);
+}
+subsys_initcall(aat2870_regulator_init);
+
+static void __exit aat2870_regulator_exit(void)
+{
+ platform_driver_unregister(&aat2870_regulator_driver);
+}
+module_exit(aat2870_regulator_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
#include <linux/version.h>
#include <linux/stringify.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
+ #include <linux/module.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
+ #include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
+ #include <linux/export.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
static struct {
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
#include <linux/limits.h>
+ #include <linux/export.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
-#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>