]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/xen/xencomm.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux...
[karo-tx-linux.git] / drivers / xen / xencomm.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
15  *
16  * Copyright (C) IBM Corp. 2006
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/mm.h>
24 #include <linux/slab.h>
25 #include <asm/page.h>
26 #include <xen/xencomm.h>
27 #include <xen/interface/xen.h>
28 #include <asm/xen/xencomm.h>    /* for xencomm_is_phys_contiguous() */
29
30 static int xencomm_init(struct xencomm_desc *desc,
31                         void *buffer, unsigned long bytes)
32 {
33         unsigned long recorded = 0;
34         int i = 0;
35
36         while ((recorded < bytes) && (i < desc->nr_addrs)) {
37                 unsigned long vaddr = (unsigned long)buffer + recorded;
38                 unsigned long paddr;
39                 int offset;
40                 int chunksz;
41
42                 offset = vaddr % PAGE_SIZE; /* handle partial pages */
43                 chunksz = min(PAGE_SIZE - offset, bytes - recorded);
44
45                 paddr = xencomm_vtop(vaddr);
46                 if (paddr == ~0UL) {
47                         printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
48                                __func__, vaddr);
49                         return -EINVAL;
50                 }
51
52                 desc->address[i++] = paddr;
53                 recorded += chunksz;
54         }
55
56         if (recorded < bytes) {
57                 printk(KERN_DEBUG
58                        "%s: could only translate %ld of %ld bytes\n",
59                        __func__, recorded, bytes);
60                 return -ENOSPC;
61         }
62
63         /* mark remaining addresses invalid (just for safety) */
64         while (i < desc->nr_addrs)
65                 desc->address[i++] = XENCOMM_INVALID;
66
67         desc->magic = XENCOMM_MAGIC;
68
69         return 0;
70 }
71
72 static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
73                                           void *buffer, unsigned long bytes)
74 {
75         struct xencomm_desc *desc;
76         unsigned long buffer_ulong = (unsigned long)buffer;
77         unsigned long start = buffer_ulong & PAGE_MASK;
78         unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
79         unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
80         unsigned long size = sizeof(*desc) +
81                 sizeof(desc->address[0]) * nr_addrs;
82
83         /*
84          * slab allocator returns at least sizeof(void*) aligned pointer.
85          * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
86          * cross page boundary.
87          */
88         if (sizeof(*desc) > sizeof(void *)) {
89                 unsigned long order = get_order(size);
90                 desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
91                                                                order);
92                 if (desc == NULL)
93                         return NULL;
94
95                 desc->nr_addrs =
96                         ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
97                         sizeof(*desc->address);
98         } else {
99                 desc = kmalloc(size, gfp_mask);
100                 if (desc == NULL)
101                         return NULL;
102
103                 desc->nr_addrs = nr_addrs;
104         }
105         return desc;
106 }
107
108 void xencomm_free(struct xencomm_handle *desc)
109 {
110         if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
111                 struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
112                 if (sizeof(*desc__) > sizeof(void *)) {
113                         unsigned long size = sizeof(*desc__) +
114                                 sizeof(desc__->address[0]) * desc__->nr_addrs;
115                         unsigned long order = get_order(size);
116                         free_pages((unsigned long)__va(desc), order);
117                 } else
118                         kfree(__va(desc));
119         }
120 }
121
122 static int xencomm_create(void *buffer, unsigned long bytes,
123                           struct xencomm_desc **ret, gfp_t gfp_mask)
124 {
125         struct xencomm_desc *desc;
126         int rc;
127
128         pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
129
130         if (bytes == 0) {
131                 /* don't create a descriptor; Xen recognizes NULL. */
132                 BUG_ON(buffer != NULL);
133                 *ret = NULL;
134                 return 0;
135         }
136
137         BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
138
139         desc = xencomm_alloc(gfp_mask, buffer, bytes);
140         if (!desc) {
141                 printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
142                 return -ENOMEM;
143         }
144
145         rc = xencomm_init(desc, buffer, bytes);
146         if (rc) {
147                 printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
148                 xencomm_free((struct xencomm_handle *)__pa(desc));
149                 return rc;
150         }
151
152         *ret = desc;
153         return 0;
154 }
155
156 static struct xencomm_handle *xencomm_create_inline(void *ptr)
157 {
158         unsigned long paddr;
159
160         BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
161
162         paddr = (unsigned long)xencomm_pa(ptr);
163         BUG_ON(paddr & XENCOMM_INLINE_FLAG);
164         return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
165 }
166
167 /* "mini" routine, for stack-based communications: */
168 static int xencomm_create_mini(void *buffer,
169         unsigned long bytes, struct xencomm_mini *xc_desc,
170         struct xencomm_desc **ret)
171 {
172         int rc = 0;
173         struct xencomm_desc *desc;
174         BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
175
176         desc = (void *)xc_desc;
177
178         desc->nr_addrs = XENCOMM_MINI_ADDRS;
179
180         rc = xencomm_init(desc, buffer, bytes);
181         if (!rc)
182                 *ret = desc;
183
184         return rc;
185 }
186
187 struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
188 {
189         int rc;
190         struct xencomm_desc *desc;
191
192         if (xencomm_is_phys_contiguous((unsigned long)ptr))
193                 return xencomm_create_inline(ptr);
194
195         rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
196
197         if (rc || desc == NULL)
198                 return NULL;
199
200         return xencomm_pa(desc);
201 }
202
203 struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
204                         struct xencomm_mini *xc_desc)
205 {
206         int rc;
207         struct xencomm_desc *desc = NULL;
208
209         if (xencomm_is_phys_contiguous((unsigned long)ptr))
210                 return xencomm_create_inline(ptr);
211
212         rc = xencomm_create_mini(ptr, bytes, xc_desc,
213                                 &desc);
214
215         if (rc)
216                 return NULL;
217
218         return xencomm_pa(desc);
219 }