]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
scsi: cxgb4i: libcxgbi: in error case RST tcp conn
[karo-tx-linux.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_reserved_pool.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  */
23 /*
24  * This file contains functions for reserved memory pool management
25  */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29
30 #include "asm/cacheflush.h"
31 #include "atomisp_internal.h"
32 #include "hmm/hmm_pool.h"
33
34 /*
35  * reserved memory pool ops.
36  */
37 static unsigned int get_pages_from_reserved_pool(void *pool,
38                                         struct hmm_page_object *page_obj,
39                                         unsigned int size, bool cached)
40 {
41         unsigned long flags;
42         unsigned int i = 0;
43         unsigned int repool_pgnr;
44         int j;
45         struct hmm_reserved_pool_info *repool_info = pool;
46
47         if (!repool_info)
48                 return 0;
49
50         spin_lock_irqsave(&repool_info->list_lock, flags);
51         if (repool_info->initialized) {
52                 repool_pgnr = repool_info->index;
53
54                 for (j = repool_pgnr-1; j >= 0; j--) {
55                         page_obj[i].page = repool_info->pages[j];
56                         page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
57                         i++;
58                         repool_info->index--;
59                         if (i == size)
60                                 break;
61                 }
62         }
63         spin_unlock_irqrestore(&repool_info->list_lock, flags);
64         return i;
65 }
66
67 static void free_pages_to_reserved_pool(void *pool,
68                                         struct hmm_page_object *page_obj)
69 {
70         unsigned long flags;
71         struct hmm_reserved_pool_info *repool_info = pool;
72
73         if (!repool_info)
74                 return;
75
76         spin_lock_irqsave(&repool_info->list_lock, flags);
77
78         if (repool_info->initialized &&
79             repool_info->index < repool_info->pgnr &&
80             page_obj->type == HMM_PAGE_TYPE_RESERVED) {
81                 repool_info->pages[repool_info->index++] = page_obj->page;
82         }
83
84         spin_unlock_irqrestore(&repool_info->list_lock, flags);
85 }
86
87 static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
88                                         unsigned int pool_size)
89 {
90         struct hmm_reserved_pool_info *pool_info;
91
92         pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
93                                 GFP_KERNEL);
94         if (unlikely(!pool_info)) {
95                 dev_err(atomisp_dev, "out of memory for repool_info.\n");
96                 return -ENOMEM;
97         }
98
99         pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
100                         GFP_KERNEL);
101         if (unlikely(!pool_info->pages)) {
102                 dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
103                 kfree(pool_info);
104                 return -ENOMEM;
105         }
106
107         pool_info->index = 0;
108         pool_info->pgnr = 0;
109         spin_lock_init(&pool_info->list_lock);
110         pool_info->initialized = true;
111
112         *repool_info = pool_info;
113
114         return 0;
115 }
116
117 static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
118 {
119         int ret;
120         unsigned int blk_pgnr;
121         unsigned int pgnr = pool_size;
122         unsigned int order = 0;
123         unsigned int i = 0;
124         int fail_number = 0;
125         struct page *pages;
126         int j;
127         struct hmm_reserved_pool_info *repool_info;
128         if (pool_size == 0)
129                 return 0;
130
131         ret = hmm_reserved_pool_setup(&repool_info, pool_size);
132         if (ret) {
133                 dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
134                 return ret;
135         }
136
137         pgnr = pool_size;
138
139         i = 0;
140         order = MAX_ORDER;
141
142         while (pgnr) {
143                 blk_pgnr = 1U << order;
144                 while (blk_pgnr > pgnr) {
145                         order--;
146                         blk_pgnr >>= 1U;
147                 }
148                 BUG_ON(order > MAX_ORDER);
149
150                 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
151                 if (unlikely(!pages)) {
152                         if (order == 0) {
153                                 fail_number++;
154                                 dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
155                                                 __func__, fail_number);
156                                 /* if fail five times, will goto end */
157
158                                 /* FIXME: whether is the mechanism is ok? */
159                                 if (fail_number == ALLOC_PAGE_FAIL_NUM)
160                                         goto end;
161                         } else {
162                                 order--;
163                         }
164                 } else {
165                         blk_pgnr = 1U << order;
166
167                         ret = set_pages_uc(pages, blk_pgnr);
168                         if (ret) {
169                                 dev_err(atomisp_dev,
170                                                 "set pages uncached failed\n");
171                                 __free_pages(pages, order);
172                                 goto end;
173                         }
174
175                         for (j = 0; j < blk_pgnr; j++)
176                                 repool_info->pages[i++] = pages + j;
177
178                         repool_info->index += blk_pgnr;
179                         repool_info->pgnr += blk_pgnr;
180
181                         pgnr -= blk_pgnr;
182
183                         fail_number = 0;
184                 }
185         }
186
187 end:
188         repool_info->initialized = true;
189
190         *pool = repool_info;
191
192         dev_info(atomisp_dev,
193                         "hmm_reserved_pool init successfully,"
194                         "hmm_reserved_pool is with %d pages.\n",
195                         repool_info->pgnr);
196         return 0;
197 }
198
199 static void hmm_reserved_pool_exit(void **pool)
200 {
201         unsigned long flags;
202         int i, ret;
203         unsigned int pgnr;
204         struct hmm_reserved_pool_info *repool_info = *pool;
205
206         if (!repool_info)
207                 return;
208
209         spin_lock_irqsave(&repool_info->list_lock, flags);
210         if (!repool_info->initialized) {
211                 spin_unlock_irqrestore(&repool_info->list_lock, flags);
212                 return;
213         }
214         pgnr = repool_info->pgnr;
215         repool_info->index = 0;
216         repool_info->pgnr = 0;
217         repool_info->initialized = false;
218         spin_unlock_irqrestore(&repool_info->list_lock, flags);
219
220         for (i = 0; i < pgnr; i++) {
221                 ret = set_pages_wb(repool_info->pages[i], 1);
222                 if (ret)
223                         dev_err(atomisp_dev,
224                                 "set page to WB err...ret=%d\n", ret);
225                 /*
226                 W/A: set_pages_wb seldom return value = -EFAULT
227                 indicate that address of page is not in valid
228                 range(0xffff880000000000~0xffffc7ffffffffff)
229                 then, _free_pages would panic; Do not know why
230                 page address be valid, it maybe memory corruption by lowmemory
231                 */
232                 if (!ret)
233                         __free_pages(repool_info->pages[i], 0);
234         }
235
236         kfree(repool_info->pages);
237         kfree(repool_info);
238
239         *pool = NULL;
240 }
241
242 static int hmm_reserved_pool_inited(void *pool)
243 {
244         struct hmm_reserved_pool_info *repool_info = pool;
245
246         if (!repool_info)
247                 return 0;
248
249         return repool_info->initialized;
250 }
251
252 struct hmm_pool_ops reserved_pops = {
253         .pool_init              = hmm_reserved_pool_init,
254         .pool_exit              = hmm_reserved_pool_exit,
255         .pool_alloc_pages       = get_pages_from_reserved_pool,
256         .pool_free_pages        = free_pages_to_reserved_pool,
257         .pool_inited            = hmm_reserved_pool_inited,
258 };