2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
23 #include "gdm_wimax.h"
27 #define B2H(x) __be16_to_cpu(x)
29 #define MAX_FREE_LIST_CNT 32
31 struct list_head head;
36 static void init_qos_entry_list(void)
38 qos_free_list.cnt = 0;
39 INIT_LIST_HEAD(&qos_free_list.head);
40 spin_lock_init(&qos_free_list.lock);
43 static void *alloc_qos_entry(void)
45 struct qos_entry_s *entry;
48 spin_lock_irqsave(&qos_free_list.lock, flags);
49 if (qos_free_list.cnt) {
50 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
52 list_del(&entry->list);
54 spin_unlock_irqrestore(&qos_free_list.lock, flags);
57 spin_unlock_irqrestore(&qos_free_list.lock, flags);
59 entry = kmalloc(sizeof(struct qos_entry_s), GFP_ATOMIC);
63 static void free_qos_entry(void *entry)
65 struct qos_entry_s *qentry = (struct qos_entry_s *) entry;
68 spin_lock_irqsave(&qos_free_list.lock, flags);
69 if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
70 list_add(&qentry->list, &qos_free_list.head);
72 spin_unlock_irqrestore(&qos_free_list.lock, flags);
75 spin_unlock_irqrestore(&qos_free_list.lock, flags);
80 static void free_qos_entry_list(struct list_head *free_list)
82 struct qos_entry_s *entry, *n;
85 list_for_each_entry_safe(entry, n, free_list, list) {
86 list_del(&entry->list);
91 pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
94 void gdm_qos_init(void *nic_ptr)
96 struct nic *nic = nic_ptr;
97 struct qos_cb_s *qcb = &nic->qos;
100 for (i = 0 ; i < QOS_MAX; i++) {
101 INIT_LIST_HEAD(&qcb->qos_list[i]);
102 qcb->csr[i].qos_buf_count = 0;
103 qcb->csr[i].enabled = 0;
106 qcb->qos_list_cnt = 0;
107 qcb->qos_null_idx = QOS_MAX-1;
108 qcb->qos_limit_size = 255;
110 spin_lock_init(&qcb->qos_lock);
112 init_qos_entry_list();
115 void gdm_qos_release_list(void *nic_ptr)
117 struct nic *nic = nic_ptr;
118 struct qos_cb_s *qcb = &nic->qos;
120 struct qos_entry_s *entry, *n;
121 struct list_head free_list;
124 INIT_LIST_HEAD(&free_list);
126 spin_lock_irqsave(&qcb->qos_lock, flags);
128 for (i = 0; i < QOS_MAX; i++) {
129 qcb->csr[i].qos_buf_count = 0;
130 qcb->csr[i].enabled = 0;
133 qcb->qos_list_cnt = 0;
134 qcb->qos_null_idx = QOS_MAX-1;
136 for (i = 0; i < QOS_MAX; i++) {
137 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
138 list_move_tail(&entry->list, &free_list);
141 spin_unlock_irqrestore(&qcb->qos_lock, flags);
142 free_qos_entry_list(&free_list);
145 static u32 chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *Stream, u8 *port)
149 if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
150 if (((Stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
151 ((Stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
155 if (csr->classifier_rule_en&PROTOCOL) {
156 if (Stream[9] != csr->protocol)
160 if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
161 for (i = 0; i < 4; i++) {
162 if ((Stream[12 + i] & csr->ipsrc_addrmask[i]) !=
163 (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
168 if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
169 for (i = 0; i < 4; i++) {
170 if ((Stream[16 + i] & csr->ipdst_addrmask[i]) !=
171 (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
176 if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
177 i = ((port[0]<<8)&0xff00)+port[1];
178 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
182 if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
183 i = ((port[2]<<8)&0xff00)+port[3];
184 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
191 static u32 get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
193 u32 IP_Ver, Header_Len, i;
194 struct qos_cb_s *qcb = &nic->qos;
196 if (iph == NULL || tcpudph == NULL)
199 IP_Ver = (iph[0]>>4)&0xf;
200 Header_Len = iph[0]&0xf;
203 for (i = 0; i < QOS_MAX; i++) {
204 if (qcb->csr[i].enabled) {
205 if (qcb->csr[i].classifier_rule_en) {
206 if (chk_ipv4_rule(&qcb->csr[i], iph,
217 static u32 extract_qos_list(struct nic *nic, struct list_head *head)
219 struct qos_cb_s *qcb = &nic->qos;
220 struct qos_entry_s *entry;
223 INIT_LIST_HEAD(head);
225 for (i = 0; i < QOS_MAX; i++) {
226 if (qcb->csr[i].enabled) {
227 if (qcb->csr[i].qos_buf_count < qcb->qos_limit_size) {
228 if (!list_empty(&qcb->qos_list[i])) {
230 qcb->qos_list[i].prev,
231 struct qos_entry_s, list);
232 list_move_tail(&entry->list, head);
233 qcb->csr[i].qos_buf_count++;
235 if (!list_empty(&qcb->qos_list[i]))
236 netdev_warn(nic->netdev,
237 "Index(%d) is piled!!\n",
247 static void send_qos_list(struct nic *nic, struct list_head *head)
249 struct qos_entry_s *entry, *n;
251 list_for_each_entry_safe(entry, n, head, list) {
252 list_del(&entry->list);
253 free_qos_entry(entry);
254 gdm_wimax_send_tx(entry->skb, entry->dev);
258 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
260 struct nic *nic = netdev_priv(dev);
262 struct qos_cb_s *qcb = &nic->qos;
264 struct ethhdr *ethh = (struct ethhdr *) (skb->data + HCI_HEADER_SIZE);
265 struct iphdr *iph = (struct iphdr *) ((char *) ethh + ETH_HLEN);
267 struct qos_entry_s *entry = NULL;
268 struct list_head send_list;
271 tcph = (struct tcphdr *) iph + iph->ihl*4;
273 if (B2H(ethh->h_proto) == ETH_P_IP) {
274 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
275 entry = alloc_qos_entry();
278 netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
282 spin_lock_irqsave(&qcb->qos_lock, flags);
283 if (qcb->qos_list_cnt) {
284 index = get_qos_index(nic, (u8 *)iph, (u8 *) tcph);
286 index = qcb->qos_null_idx;
289 entry = alloc_qos_entry();
294 list_add_tail(&entry->list, &qcb->qos_list[index]);
295 extract_qos_list(nic, &send_list);
296 spin_unlock_irqrestore(&qcb->qos_lock, flags);
297 send_qos_list(nic, &send_list);
300 spin_unlock_irqrestore(&qcb->qos_lock, flags);
302 free_qos_entry(entry);
305 ret = gdm_wimax_send_tx(skb, dev);
310 static u32 get_csr(struct qos_cb_s *qcb, u32 SFID, int mode)
314 for (i = 0; i < qcb->qos_list_cnt; i++) {
315 if (qcb->csr[i].SFID == SFID)
320 for (i = 0; i < QOS_MAX; i++) {
321 if (qcb->csr[i].enabled == 0) {
322 qcb->csr[i].enabled = 1;
331 #define QOS_CHANGE_DEL 0xFC
333 #define QOS_REPORT 0xFE
335 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
337 struct nic *nic = nic_ptr;
338 u32 i, SFID, index, pos;
340 struct qos_cb_s *qcb = &nic->qos;
341 struct qos_entry_s *entry, *n;
342 struct list_head send_list;
343 struct list_head free_list;
346 subCmdEvt = (u8)buf[4];
348 if (subCmdEvt == QOS_REPORT) {
349 spin_lock_irqsave(&qcb->qos_lock, flags);
350 for (i = 0; i < qcb->qos_list_cnt; i++) {
351 SFID = ((buf[(i*5)+6]<<24)&0xff000000);
352 SFID += ((buf[(i*5)+7]<<16)&0xff0000);
353 SFID += ((buf[(i*5)+8]<<8)&0xff00);
354 SFID += (buf[(i*5)+9]);
355 index = get_csr(qcb, SFID, 0);
357 spin_unlock_irqrestore(&qcb->qos_lock, flags);
358 netdev_err(nic->netdev, "QoS ERROR: No SF\n");
361 qcb->csr[index].qos_buf_count = buf[(i*5)+10];
364 extract_qos_list(nic, &send_list);
365 spin_unlock_irqrestore(&qcb->qos_lock, flags);
366 send_qos_list(nic, &send_list);
370 /* subCmdEvt == QOS_ADD || subCmdEvt == QOS_CHANG_DEL */
372 SFID = ((buf[pos++]<<24)&0xff000000);
373 SFID += ((buf[pos++]<<16)&0xff0000);
374 SFID += ((buf[pos++]<<8)&0xff00);
375 SFID += (buf[pos++]);
377 index = get_csr(qcb, SFID, 1);
379 netdev_err(nic->netdev,
380 "QoS ERROR: csr Update Error / Wrong index (%d) \n",
385 if (subCmdEvt == QOS_ADD) {
386 netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
389 spin_lock_irqsave(&qcb->qos_lock, flags);
390 qcb->csr[index].SFID = SFID;
391 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
392 qcb->csr[index].classifier_rule_en += buf[pos++];
393 if (qcb->csr[index].classifier_rule_en == 0)
394 qcb->qos_null_idx = index;
395 qcb->csr[index].ip2s_mask = buf[pos++];
396 qcb->csr[index].ip2s_lo = buf[pos++];
397 qcb->csr[index].ip2s_hi = buf[pos++];
398 qcb->csr[index].protocol = buf[pos++];
399 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
400 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
401 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
402 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
403 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
404 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
405 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
406 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
407 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
408 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
409 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
410 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
411 qcb->csr[index].ipdst_addr[0] = buf[pos++];
412 qcb->csr[index].ipdst_addr[1] = buf[pos++];
413 qcb->csr[index].ipdst_addr[2] = buf[pos++];
414 qcb->csr[index].ipdst_addr[3] = buf[pos++];
415 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
416 qcb->csr[index].srcport_lo += buf[pos++];
417 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
418 qcb->csr[index].srcport_hi += buf[pos++];
419 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
420 qcb->csr[index].dstport_lo += buf[pos++];
421 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
422 qcb->csr[index].dstport_hi += buf[pos++];
424 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
425 spin_unlock_irqrestore(&qcb->qos_lock, flags);
426 } else if (subCmdEvt == QOS_CHANGE_DEL) {
427 netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
430 INIT_LIST_HEAD(&free_list);
432 spin_lock_irqsave(&qcb->qos_lock, flags);
433 qcb->csr[index].enabled = 0;
435 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
437 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
439 list_move_tail(&entry->list, &free_list);
441 spin_unlock_irqrestore(&qcb->qos_lock, flags);
442 free_qos_entry_list(&free_list);