X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=drivers%2Fdma%2Fdmaengine.c;h=d6dc70fd75270c2522823f798235c8ba703f1ce7;hb=ef38ff9d372d4fe69e415370939a0f1fb5783af1;hp=d59b2f417306f91af453962991c5c681aa20381b;hpb=058250a0d5886b4d96a195ecc7e3a75e2df5e4b1;p=karo-tx-linux.git diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d59b2f417306..d6dc70fd7527 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -41,12 +41,12 @@ * the definition of dma_event_callback in dmaengine.h. * * Each device has a kref, which is initialized to 1 when the device is - * registered. A kref_get is done for each class_device registered. When the - * class_device is released, the coresponding kref_put is done in the release + * registered. A kref_get is done for each device registered. When the + * device is released, the coresponding kref_put is done in the release * method. Every time one of the device's channels is allocated to a client, * a kref_get occurs. When the channel is freed, the coresponding kref_put * happens. The device's release function does a completion, so - * unregister_device does a remove event, class_device_unregister, a kref_put + * unregister_device does a remove event, device_unregister, a kref_put * for the first reference, then waits on the completion for all other * references to finish. * @@ -77,9 +77,9 @@ static LIST_HEAD(dma_client_list); /* --- sysfs implementation --- */ -static ssize_t show_memcpy_count(struct class_device *cd, char *buf) +static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); unsigned long count = 0; int i; @@ -89,9 +89,10 @@ static ssize_t show_memcpy_count(struct class_device *cd, char *buf) return sprintf(buf, "%lu\n", count); } -static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) +static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, + char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); unsigned long count = 0; int i; @@ -101,9 +102,9 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) return sprintf(buf, "%lu\n", count); } -static ssize_t show_in_use(struct class_device *cd, char *buf) +static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); int in_use = 0; if (unlikely(chan->slow_ref) && @@ -119,7 +120,7 @@ static ssize_t show_in_use(struct class_device *cd, char *buf) return sprintf(buf, "%d\n", in_use); } -static struct class_device_attribute dma_class_attrs[] = { +static struct device_attribute dma_attrs[] = { __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), __ATTR(in_use, S_IRUGO, show_in_use, NULL), @@ -128,16 +129,16 @@ static struct class_device_attribute dma_class_attrs[] = { static void dma_async_device_cleanup(struct kref *kref); -static void dma_class_dev_release(struct class_device *cd) +static void dma_dev_release(struct device *dev) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); kref_put(&chan->device->refcount, dma_async_device_cleanup); } static struct class dma_devclass = { - .name = "dma", - .class_dev_attrs = dma_class_attrs, - .release = dma_class_dev_release, + .name = "dma", + .dev_attrs = dma_attrs, + .dev_release = dma_dev_release, }; /* --- client and device registration --- */ @@ -356,12 +357,11 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_zero_sum); BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset); - BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && + BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); - BUG_ON(!device->device_dependency_added); BUG_ON(!device->device_is_tx_complete); BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); @@ -377,12 +377,12 @@ int dma_async_device_register(struct dma_device *device) continue; chan->chan_id = chancnt++; - chan->class_dev.class = &dma_devclass; - chan->class_dev.dev = NULL; - snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", + chan->dev.class = &dma_devclass; + chan->dev.parent = NULL; + snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", device->dev_id, chan->chan_id); - rc = class_device_register(&chan->class_dev); + rc = device_register(&chan->dev); if (rc) { chancnt--; free_percpu(chan->local); @@ -411,7 +411,7 @@ err_out: if (chan->local == NULL) continue; kref_put(&device->refcount, dma_async_device_cleanup); - class_device_unregister(&chan->class_dev); + device_unregister(&chan->dev); chancnt--; free_percpu(chan->local); } @@ -445,7 +445,7 @@ void dma_async_device_unregister(struct dma_device *device) list_for_each_entry(chan, &device->channels, device_node) { dma_clients_notify_removed(chan); - class_device_unregister(&chan->class_dev); + device_unregister(&chan->dev); dma_chan_release(chan); } @@ -472,20 +472,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); + dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); + + if (!tx) { + dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } - tx->ack = 1; tx->callback = NULL; - addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu(); @@ -516,20 +518,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); + dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); + + if (!tx) { + dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } - tx->ack = 1; tx->callback = NULL; - addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu(); @@ -562,20 +566,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); + dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); + + if (!tx) { + dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } - tx->ack = 1; tx->callback = NULL; - addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu(); @@ -592,8 +599,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->depend_node); - INIT_LIST_HEAD(&tx->depend_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init);