4 * Copyright(c) 2015, 2016 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/list.h>
51 #include <linux/workqueue.h>
52 #include <linux/sched.h>
54 #include "sdma_txreq.h"
57 * typedef (*restart_t)() - restart callback
58 * @work: pointer to work structure
60 typedef void (*restart_t)(struct work_struct *work);
65 * struct iowait - linkage for delayed progress/waiting
66 * @list: used to add/insert into QP/PQ wait lists
67 * @lock: uses to record the list head lock
68 * @tx_head: overflow list of sdma_txreq's
69 * @sleep: no space callback
70 * @wakeup: space callback wakeup
71 * @sdma_drained: sdma count drained
72 * @iowork: workqueue overhead
73 * @wait_dma: wait for sdma_busy == 0
74 * @wait_pio: wait for pio_busy == 0
75 * @sdma_busy: # of packets in flight
76 * @count: total number of descriptors in tx_head'ed list
77 * @tx_limit: limit for overflow queuing
78 * @tx_count: number of tx entry's in tx_head'ed list
80 * This is to be embedded in user's state structure
83 * The sleep and wakeup members are a
84 * bit misnamed. They do not strictly
85 * speaking sleep or wake up, but they
86 * are callbacks for the ULP to implement
87 * what ever queuing/dequeuing of
88 * the embedded iowait and its containing struct
89 * when a resource shortage like SDMA ring space is seen.
91 * Both potentially have locks help
92 * so sleeping is not allowed.
94 * The wait_dma member along with the iow
96 * The lock field is used by waiters to record
97 * the seqlock_t that guards the list head.
98 * Waiters explicity know that, but the destroy
99 * code that unwaits QPs does not.
103 struct list_head list;
104 struct list_head tx_head;
106 struct sdma_engine *sde,
108 struct sdma_txreq *tx,
110 void (*wakeup)(struct iowait *wait, int reason);
111 void (*sdma_drained)(struct iowait *wait);
113 struct work_struct iowork;
114 wait_queue_head_t wait_dma;
115 wait_queue_head_t wait_pio;
123 #define SDMA_AVAIL_REASON 0
126 * iowait_init() - initialize wait structure
127 * @wait: wait struct to initialize
128 * @tx_limit: limit for overflow queuing
129 * @func: restart function for workqueue
130 * @sleep: sleep function for no space
131 * @resume: wakeup function for no space
133 * This function initializes the iowait
134 * structure embedded in the QP or PQ.
138 static inline void iowait_init(
141 void (*func)(struct work_struct *work),
143 struct sdma_engine *sde,
145 struct sdma_txreq *tx,
147 void (*wakeup)(struct iowait *wait, int reason),
148 void (*sdma_drained)(struct iowait *wait))
152 INIT_LIST_HEAD(&wait->list);
153 INIT_LIST_HEAD(&wait->tx_head);
154 INIT_WORK(&wait->iowork, func);
155 init_waitqueue_head(&wait->wait_dma);
156 init_waitqueue_head(&wait->wait_pio);
157 atomic_set(&wait->sdma_busy, 0);
158 atomic_set(&wait->pio_busy, 0);
159 wait->tx_limit = tx_limit;
161 wait->wakeup = wakeup;
162 wait->sdma_drained = sdma_drained;
166 * iowait_schedule() - initialize wait structure
167 * @wait: wait struct to schedule
168 * @wq: workqueue for schedule
171 static inline void iowait_schedule(
173 struct workqueue_struct *wq,
176 queue_work_on(cpu, wq, &wait->iowork);
180 * iowait_sdma_drain() - wait for DMAs to drain
182 * @wait: iowait structure
184 * This will delay until the iowait sdmas have
187 static inline void iowait_sdma_drain(struct iowait *wait)
189 wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
193 * iowait_sdma_pending() - return sdma pending count
195 * @wait: iowait structure
198 static inline int iowait_sdma_pending(struct iowait *wait)
200 return atomic_read(&wait->sdma_busy);
204 * iowait_sdma_inc - note sdma io pending
205 * @wait: iowait structure
207 static inline void iowait_sdma_inc(struct iowait *wait)
209 atomic_inc(&wait->sdma_busy);
213 * iowait_sdma_add - add count to pending
214 * @wait: iowait structure
216 static inline void iowait_sdma_add(struct iowait *wait, int count)
218 atomic_add(count, &wait->sdma_busy);
222 * iowait_sdma_dec - note sdma complete
223 * @wait: iowait structure
225 static inline int iowait_sdma_dec(struct iowait *wait)
227 return atomic_dec_and_test(&wait->sdma_busy);
231 * iowait_pio_drain() - wait for pios to drain
233 * @wait: iowait structure
235 * This will delay until the iowait pios have
238 static inline void iowait_pio_drain(struct iowait *wait)
240 wait_event_timeout(wait->wait_pio,
241 !atomic_read(&wait->pio_busy),
246 * iowait_pio_pending() - return pio pending count
248 * @wait: iowait structure
251 static inline int iowait_pio_pending(struct iowait *wait)
253 return atomic_read(&wait->pio_busy);
257 * iowait_pio_inc - note pio pending
258 * @wait: iowait structure
260 static inline void iowait_pio_inc(struct iowait *wait)
262 atomic_inc(&wait->pio_busy);
266 * iowait_sdma_dec - note pio complete
267 * @wait: iowait structure
269 static inline int iowait_pio_dec(struct iowait *wait)
271 return atomic_dec_and_test(&wait->pio_busy);
275 * iowait_drain_wakeup() - trigger iowait_drain() waiter
277 * @wait: iowait structure
279 * This will trigger any waiters.
281 static inline void iowait_drain_wakeup(struct iowait *wait)
283 wake_up(&wait->wait_dma);
284 wake_up(&wait->wait_pio);
285 if (wait->sdma_drained)
286 wait->sdma_drained(wait);
290 * iowait_get_txhead() - get packet off of iowait list
292 * @wait wait struture
294 static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
296 struct sdma_txreq *tx = NULL;
298 if (!list_empty(&wait->tx_head)) {
299 tx = list_first_entry(
303 list_del_init(&tx->list);