2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
36 #include <linux/scatterlist.h>
37 #include <linux/slab.h>
39 #include <linux/mlx4/cmd.h>
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
50 MLX4_ICM_ALLOC_SIZE = 1 << 18,
51 MLX4_TABLE_CHUNK_SIZE = 1 << 18
54 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL);
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(sg_page(&chunk->mem[i]),
64 get_order(chunk->mem[i].length));
67 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73 lowmem_page_address(sg_page(&chunk->mem[i])),
74 sg_dma_address(&chunk->mem[i]));
77 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
79 struct mlx4_icm_chunk *chunk, *tmp;
84 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
86 mlx4_free_icm_coherent(dev, chunk);
88 mlx4_free_icm_pages(dev, chunk);
96 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
100 page = alloc_pages(gfp_mask, order);
104 sg_set_page(mem, page, PAGE_SIZE << order, 0);
108 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
109 int order, gfp_t gfp_mask)
111 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
112 &sg_dma_address(mem), gfp_mask);
116 sg_set_buf(mem, buf, PAGE_SIZE << order);
118 sg_dma_len(mem) = PAGE_SIZE << order;
122 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
123 gfp_t gfp_mask, int coherent)
125 struct mlx4_icm *icm;
126 struct mlx4_icm_chunk *chunk = NULL;
130 /* We use sg_set_buf for coherent allocs, which assumes low memory */
131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
138 INIT_LIST_HEAD(&icm->chunk_list);
140 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
144 chunk = kmalloc(sizeof *chunk,
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
149 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
152 list_add_tail(&chunk->list, &icm->chunk_list);
155 while (1 << cur_order > npages)
159 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
160 &chunk->mem[chunk->npages],
161 cur_order, gfp_mask);
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask);
177 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
178 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
180 PCI_DMA_BIDIRECTIONAL);
186 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
189 npages -= 1 << cur_order;
192 if (!coherent && chunk) {
193 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
195 PCI_DMA_BIDIRECTIONAL);
204 mlx4_free_icm(dev, icm, coherent);
208 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
213 int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
216 MLX4_CMD_TIME_CLASS_B);
219 int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
221 struct mlx4_cmd_mailbox *mailbox;
225 mailbox = mlx4_alloc_cmd_mailbox(dev);
227 return PTR_ERR(mailbox);
228 inbox = mailbox->buf;
230 inbox[0] = cpu_to_be64(virt);
231 inbox[1] = cpu_to_be64(dma_addr);
233 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
234 MLX4_CMD_TIME_CLASS_B);
236 mlx4_free_cmd_mailbox(dev, mailbox);
239 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
240 (unsigned long long) dma_addr, (unsigned long long) virt);
245 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
247 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
250 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
252 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
255 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
257 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
260 mutex_lock(&table->mutex);
263 ++table->icm[i]->refcount;
267 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
268 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
269 __GFP_NOWARN, table->coherent);
270 if (!table->icm[i]) {
275 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
276 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
277 mlx4_free_icm(dev, table->icm[i], table->coherent);
278 table->icm[i] = NULL;
283 ++table->icm[i]->refcount;
286 mutex_unlock(&table->mutex);
290 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
294 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
296 mutex_lock(&table->mutex);
298 if (--table->icm[i]->refcount == 0) {
299 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
300 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
301 mlx4_free_icm(dev, table->icm[i], table->coherent);
302 table->icm[i] = NULL;
305 mutex_unlock(&table->mutex);
308 void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
310 int idx, offset, dma_offset, i;
311 struct mlx4_icm_chunk *chunk;
312 struct mlx4_icm *icm;
313 struct page *page = NULL;
318 mutex_lock(&table->mutex);
320 idx = (obj & (table->num_obj - 1)) * table->obj_size;
321 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
322 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
327 list_for_each_entry(chunk, &icm->chunk_list, list) {
328 for (i = 0; i < chunk->npages; ++i) {
329 if (dma_handle && dma_offset >= 0) {
330 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
331 *dma_handle = sg_dma_address(&chunk->mem[i]) +
333 dma_offset -= sg_dma_len(&chunk->mem[i]);
336 * DMA mapping can merge pages but not split them,
337 * so if we found the page, dma_handle has already
340 if (chunk->mem[i].length > offset) {
341 page = sg_page(&chunk->mem[i]);
344 offset -= chunk->mem[i].length;
349 mutex_unlock(&table->mutex);
350 return page ? lowmem_page_address(page) + offset : NULL;
353 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
356 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
359 for (i = start; i <= end; i += inc) {
360 err = mlx4_table_get(dev, table, i);
370 mlx4_table_put(dev, table, i);
376 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
381 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
382 mlx4_table_put(dev, table, i);
385 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
386 u64 virt, int obj_size, int nobj, int reserved,
387 int use_lowmem, int use_coherent)
394 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
395 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
397 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
401 table->num_icm = num_icm;
402 table->num_obj = nobj;
403 table->obj_size = obj_size;
404 table->lowmem = use_lowmem;
405 table->coherent = use_coherent;
406 mutex_init(&table->mutex);
408 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
409 chunk_size = MLX4_TABLE_CHUNK_SIZE;
410 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
411 chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
413 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
414 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
415 __GFP_NOWARN, use_coherent);
418 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
419 mlx4_free_icm(dev, table->icm[i], use_coherent);
420 table->icm[i] = NULL;
425 * Add a reference to this ICM chunk so that it never
426 * gets freed (since it contains reserved firmware objects).
428 ++table->icm[i]->refcount;
434 for (i = 0; i < num_icm; ++i)
436 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
437 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
438 mlx4_free_icm(dev, table->icm[i], use_coherent);
444 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
448 for (i = 0; i < table->num_icm; ++i)
450 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
451 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
452 mlx4_free_icm(dev, table->icm[i], table->coherent);