1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #ifndef __iwl_trans_h__
64 #define __iwl_trans_h__
66 #include <linux/debugfs.h>
67 #include <linux/skbuff.h>
69 #include "iwl-shared.h"
70 #include "iwl-commands.h"
72 /*This file includes the declaration that are exported from the transport
78 #define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
79 #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
80 #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
85 CMD_WANT_SKB = BIT(1),
86 CMD_ON_DEMAND = BIT(2),
89 #define DEF_CMD_PAYLOAD_SIZE 320
92 * struct iwl_device_cmd
94 * For allocation of the command and tx queues, this establishes the overall
95 * size of the largest command we send to uCode, except for commands that
96 * aren't fully copied and use other TFD space.
98 struct iwl_device_cmd {
99 struct iwl_cmd_header hdr; /* uCode API */
105 struct iwl_tx_cmd tx;
106 struct iwl6000_channel_switch_cmd chswitch;
107 u8 payload[DEF_CMD_PAYLOAD_SIZE];
111 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
113 #define IWL_MAX_CMD_TFDS 2
115 enum iwl_hcmd_dataflag {
116 IWL_HCMD_DFL_NOCOPY = BIT(0),
120 * struct iwl_host_cmd - Host command to the uCode
121 * @data: array of chunks that composes the data of the host command
122 * @reply_page: pointer to the page that holds the response to the host command
124 * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
125 * @len: array of the lenths of the chunks in data
127 * @id: id of the host command
129 struct iwl_host_cmd {
130 const void *data[IWL_MAX_CMD_TFDS];
131 unsigned long reply_page;
132 void (*callback)(struct iwl_shared *shrd,
133 struct iwl_device_cmd *cmd,
134 struct iwl_rx_packet *pkt);
136 u16 len[IWL_MAX_CMD_TFDS];
137 u8 dataflags[IWL_MAX_CMD_TFDS];
142 * struct iwl_trans_ops - transport specific operations
143 * @alloc: allocates the meta data (not the queues themselves)
144 * @request_irq: requests IRQ - will be called before the FW load in probe flow
145 * @start_device: allocates and inits all the resources for the transport
147 * @prepare_card_hw: claim the ownership on the HW. Will be called during
149 * @tx_start: starts and configures all the Tx fifo - usually done once the fw
151 * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
152 * @stop_device:stops the whole device (embedded CPU put to reset)
153 * @send_cmd:send a host command
154 * @send_cmd_pdu:send a host command: flags can be CMD_*
156 * @reclaim: free packet until ssn. Returns a list of freed packets.
157 * @tx_agg_alloc: allocate resources for a TX BA session
158 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
159 * ready and a successful ADDBA response has been received.
160 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
161 * @kick_nic: remove the RESET from the embedded CPU and let it run
162 * @free: release all the ressource for the transport layer itself such as
163 * irq, tasklet etc...
164 * @stop_queue: stop a specific queue
165 * @check_stuck_queue: check if a specific queue is stuck
166 * @wait_tx_queue_empty: wait until all tx queues are empty
167 * @dbgfs_register: add the dbgfs files under this directory. Files will be
168 * automatically deleted.
169 * @suspend: stop the device unless WoWLAN is configured
170 * @resume: resume activity of the device
172 struct iwl_trans_ops {
174 struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
175 int (*request_irq)(struct iwl_trans *iwl_trans);
176 int (*start_device)(struct iwl_trans *trans);
177 int (*prepare_card_hw)(struct iwl_trans *trans);
178 void (*stop_device)(struct iwl_trans *trans);
179 void (*tx_start)(struct iwl_trans *trans);
181 void (*wake_any_queue)(struct iwl_trans *trans,
182 enum iwl_rxon_context_id ctx);
184 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
186 int (*send_cmd_pdu)(struct iwl_trans *trans, u8 id, u32 flags, u16 len,
188 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
189 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
191 void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
192 int txq_id, int ssn, u32 status,
193 struct sk_buff_head *skbs);
195 int (*tx_agg_disable)(struct iwl_trans *trans,
196 enum iwl_rxon_context_id ctx, int sta_id,
198 int (*tx_agg_alloc)(struct iwl_trans *trans,
199 enum iwl_rxon_context_id ctx, int sta_id, int tid,
201 void (*tx_agg_setup)(struct iwl_trans *trans,
202 enum iwl_rxon_context_id ctx, int sta_id, int tid,
205 void (*kick_nic)(struct iwl_trans *trans);
207 void (*free)(struct iwl_trans *trans);
209 void (*stop_queue)(struct iwl_trans *trans, int q);
211 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
212 int (*check_stuck_queue)(struct iwl_trans *trans, int q);
213 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
215 int (*suspend)(struct iwl_trans *trans);
216 int (*resume)(struct iwl_trans *trans);
220 * struct iwl_trans - transport common data
221 * @ops - pointer to iwl_trans_ops
222 * @shrd - pointer to iwl_shared which holds shared data from the upper layer
223 * @hcmd_lock: protects HCMD
226 const struct iwl_trans_ops *ops;
227 struct iwl_shared *shrd;
228 spinlock_t hcmd_lock;
230 /* pointer to trans specific struct */
231 /*Ensure that this pointer will always be aligned to sizeof pointer */
232 char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
235 static inline int iwl_trans_request_irq(struct iwl_trans *trans)
237 return trans->ops->request_irq(trans);
240 static inline int iwl_trans_start_device(struct iwl_trans *trans)
242 return trans->ops->start_device(trans);
245 static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
247 return trans->ops->prepare_card_hw(trans);
250 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
252 trans->ops->stop_device(trans);
255 static inline void iwl_trans_tx_start(struct iwl_trans *trans)
257 trans->ops->tx_start(trans);
260 static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
261 enum iwl_rxon_context_id ctx)
263 trans->ops->wake_any_queue(trans, ctx);
267 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
268 struct iwl_host_cmd *cmd)
270 return trans->ops->send_cmd(trans, cmd);
273 static inline int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
274 u32 flags, u16 len, const void *data)
276 return trans->ops->send_cmd_pdu(trans, id, flags, len, data);
279 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
280 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
283 return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
286 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
287 int tid, int txq_id, int ssn, u32 status,
288 struct sk_buff_head *skbs)
290 trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
293 static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
294 enum iwl_rxon_context_id ctx,
297 return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
300 static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
301 enum iwl_rxon_context_id ctx,
302 int sta_id, int tid, u16 *ssn)
304 return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
308 static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
309 enum iwl_rxon_context_id ctx,
313 trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
316 static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
318 trans->ops->kick_nic(trans);
321 static inline void iwl_trans_free(struct iwl_trans *trans)
323 trans->ops->free(trans);
326 static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
328 trans->ops->stop_queue(trans, q);
331 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
333 return trans->ops->wait_tx_queue_empty(trans);
336 static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
338 return trans->ops->check_stuck_queue(trans, q);
340 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
343 return trans->ops->dbgfs_register(trans, dir);
346 static inline int iwl_trans_suspend(struct iwl_trans *trans)
348 return trans->ops->suspend(trans);
351 static inline int iwl_trans_resume(struct iwl_trans *trans)
353 return trans->ops->resume(trans);
356 /*****************************************************
357 * Transport layers implementations
358 ******************************************************/
359 extern const struct iwl_trans_ops trans_ops_pcie;
361 #endif /* __iwl_trans_h__ */