5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/slab.h>
72 #include <asm/byteorder.h>
73 #include <asm/uaccess.h>
75 #include <linux/atmdev.h>
76 #include <linux/atm.h>
77 #include <linux/sonet.h>
79 #undef USE_SCATTERGATHER
80 #undef USE_CHECKSUM_HW /* still confused about this */
85 #include <linux/atm_he.h>
87 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
90 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #define HPRINTK(fmt,args...) do { } while (0)
97 static int he_open(struct atm_vcc *vcc);
98 static void he_close(struct atm_vcc *vcc);
99 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
100 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
101 static irqreturn_t he_irq_handler(int irq, void *dev_id);
102 static void he_tasklet(unsigned long data);
103 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
104 static int he_start(struct atm_dev *dev);
105 static void he_stop(struct he_dev *dev);
106 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
107 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
113 static struct he_dev *he_devs;
114 static int disable64;
115 static short nvpibits = -1;
116 static short nvcibits = -1;
117 static short rx_skb_reserve = 16;
118 static int irq_coalesce = 1;
121 /* Read from EEPROM = 0000 0011b */
122 static unsigned int readtab[] = {
137 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_HIGH | SI_HIGH /* 1 */
142 /* Clock to read from/write to the EEPROM */
143 static unsigned int clocktab[] = {
163 static struct atmdev_ops he_ops =
169 .phy_put = he_phy_put,
170 .phy_get = he_phy_get,
171 .proc_read = he_proc_read,
175 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
176 #define he_readl(dev, reg) readl((dev)->membase + (reg))
178 /* section 2.12 connection memory access */
180 static __inline__ void
181 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
184 he_writel(he_dev, val, CON_DAT);
185 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
186 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
187 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
190 #define he_writel_rcm(dev, val, reg) \
191 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 #define he_writel_tcm(dev, val, reg) \
194 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 #define he_writel_mbox(dev, val, reg) \
197 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
200 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
203 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
204 return he_readl(he_dev, CON_DAT);
207 #define he_readl_rcm(dev, reg) \
208 he_readl_internal(dev, reg, CON_CTL_RCM)
210 #define he_readl_tcm(dev, reg) \
211 he_readl_internal(dev, reg, CON_CTL_TCM)
213 #define he_readl_mbox(dev, reg) \
214 he_readl_internal(dev, reg, CON_CTL_MBOX)
217 /* figure 2.2 connection id */
219 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 /* 2.5.1 per connection transmit state registers */
223 #define he_writel_tsr0(dev, val, cid) \
224 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
225 #define he_readl_tsr0(dev, cid) \
226 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 #define he_writel_tsr1(dev, val, cid) \
229 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 #define he_writel_tsr2(dev, val, cid) \
232 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 #define he_writel_tsr3(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 #define he_writel_tsr4(dev, val, cid) \
238 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
242 * NOTE While the transmit connection is active, bits 23 through 0
243 * of this register must not be written by the host. Byte
244 * enables should be used during normal operation when writing
245 * the most significant byte.
248 #define he_writel_tsr4_upper(dev, val, cid) \
249 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 | CON_BYTE_DISABLE_2 \
252 | CON_BYTE_DISABLE_1 \
253 | CON_BYTE_DISABLE_0)
255 #define he_readl_tsr4(dev, cid) \
256 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 #define he_writel_tsr5(dev, val, cid) \
259 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 #define he_writel_tsr6(dev, val, cid) \
262 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 #define he_writel_tsr7(dev, val, cid) \
265 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
268 #define he_writel_tsr8(dev, val, cid) \
269 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 #define he_writel_tsr9(dev, val, cid) \
272 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 #define he_writel_tsr10(dev, val, cid) \
275 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 #define he_writel_tsr11(dev, val, cid) \
278 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
281 #define he_writel_tsr12(dev, val, cid) \
282 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 #define he_writel_tsr13(dev, val, cid) \
285 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
288 #define he_writel_tsr14(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 #define he_writel_tsr14_upper(dev, val, cid) \
292 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 | CON_BYTE_DISABLE_2 \
295 | CON_BYTE_DISABLE_1 \
296 | CON_BYTE_DISABLE_0)
298 /* 2.7.1 per connection receive state registers */
300 #define he_writel_rsr0(dev, val, cid) \
301 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
302 #define he_readl_rsr0(dev, cid) \
303 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 #define he_writel_rsr1(dev, val, cid) \
306 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 #define he_writel_rsr2(dev, val, cid) \
309 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 #define he_writel_rsr3(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 #define he_writel_rsr4(dev, val, cid) \
315 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 #define he_writel_rsr5(dev, val, cid) \
318 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 #define he_writel_rsr6(dev, val, cid) \
321 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 #define he_writel_rsr7(dev, val, cid) \
324 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 static __inline__ struct atm_vcc*
327 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 struct hlist_head *head;
331 struct hlist_node *node;
336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 sk_for_each(s, node, head) {
342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
352 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL;
358 printk(KERN_INFO "ATM he driver\n");
360 if (pci_enable_device(pci_dev))
362 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n");
365 goto init_one_failure;
368 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
371 goto init_one_failure;
373 pci_set_drvdata(pci_dev, atm_dev);
375 he_dev = kzalloc(sizeof(struct he_dev),
379 goto init_one_failure;
381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number;
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 spin_lock_init(&he_dev->global_lock);
389 if (he_start(atm_dev)) {
392 goto init_one_failure;
396 he_dev->next = he_devs;
402 atm_dev_deregister(atm_dev);
404 pci_disable_device(pci_dev);
408 static void __devexit
409 he_remove_one (struct pci_dev *pci_dev)
411 struct atm_dev *atm_dev;
412 struct he_dev *he_dev;
414 atm_dev = pci_get_drvdata(pci_dev);
415 he_dev = HE_DEV(atm_dev);
417 /* need to remove from he_devs */
420 atm_dev_deregister(atm_dev);
423 pci_set_drvdata(pci_dev, NULL);
424 pci_disable_device(pci_dev);
429 rate_to_atmf(unsigned rate) /* cps to atm forum format */
431 #define NONZERO (1 << 14)
439 while (rate > 0x3ff) {
444 return (NONZERO | (exp << 9) | (rate & 0x1ff));
447 static void __devinit
448 he_init_rx_lbfp0(struct he_dev *he_dev)
450 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
451 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
452 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
453 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
456 lbm_offset = he_readl(he_dev, RCMLBM_BA);
458 he_writel(he_dev, lbufd_index, RLBF0_H);
460 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
465 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467 if (++lbuf_count == lbufs_per_row) {
469 row_offset += he_dev->bytes_per_row;
474 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
475 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
478 static void __devinit
479 he_init_rx_lbfp1(struct he_dev *he_dev)
481 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
482 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
483 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
484 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
487 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489 he_writel(he_dev, lbufd_index, RLBF1_H);
491 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
496 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498 if (++lbuf_count == lbufs_per_row) {
500 row_offset += he_dev->bytes_per_row;
505 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
506 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
509 static void __devinit
510 he_init_tx_lbfp(struct he_dev *he_dev)
512 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
513 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
514 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
515 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
518 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520 he_writel(he_dev, lbufd_index, TLBF_H);
522 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
527 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529 if (++lbuf_count == lbufs_per_row) {
531 row_offset += he_dev->bytes_per_row;
536 he_writel(he_dev, lbufd_index - 1, TLBF_T);
540 he_init_tpdrq(struct he_dev *he_dev)
542 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
543 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
544 if (he_dev->tpdrq_base == NULL) {
545 hprintk("failed to alloc tpdrq\n");
548 memset(he_dev->tpdrq_base, 0,
549 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551 he_dev->tpdrq_tail = he_dev->tpdrq_base;
552 he_dev->tpdrq_head = he_dev->tpdrq_base;
554 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
555 he_writel(he_dev, 0, TPDRQ_T);
556 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
561 static void __devinit
562 he_init_cs_block(struct he_dev *he_dev)
564 unsigned clock, rate, delta;
567 /* 5.1.7 cs block initialization */
569 for (reg = 0; reg < 0x20; ++reg)
570 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572 /* rate grid timer reload values */
574 clock = he_is622(he_dev) ? 66667000 : 50000000;
575 rate = he_dev->atm_dev->link_rate;
576 delta = rate / 16 / 2;
578 for (reg = 0; reg < 0x10; ++reg) {
579 /* 2.4 internal transmit function
581 * we initialize the first row in the rate grid.
582 * values are period (in clock cycles) of timer
584 unsigned period = clock / rate;
586 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
590 if (he_is622(he_dev)) {
591 /* table 5.2 (4 cells per lbuf) */
592 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
593 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
594 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
595 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
596 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
599 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
600 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
601 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
602 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
603 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
604 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
609 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
610 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
611 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
612 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
613 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
614 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
617 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
618 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620 /* table 5.1 (4 cells per lbuf) */
621 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
622 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
623 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
624 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
625 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
628 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
629 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
630 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
631 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
632 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
633 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
638 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
639 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
640 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
641 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
642 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
643 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
646 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
647 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
650 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652 for (reg = 0; reg < 0x8; ++reg)
653 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
658 he_init_cs_block_rcm(struct he_dev *he_dev)
660 unsigned (*rategrid)[16][16];
661 unsigned rate, delta;
664 unsigned rate_atmf, exp, man;
665 unsigned long long rate_cps;
666 int mult, buf, buf_limit = 4;
668 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
672 /* initialize rate grid group table */
674 for (reg = 0x0; reg < 0xff; ++reg)
675 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677 /* initialize rate controller groups */
679 for (reg = 0x100; reg < 0x1ff; ++reg)
680 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682 /* initialize tNrm lookup table */
684 /* the manual makes reference to a routine in a sample driver
685 for proper configuration; fortunately, we only need this
686 in order to support abr connection */
688 /* initialize rate to group table */
690 rate = he_dev->atm_dev->link_rate;
694 * 2.4 transmit internal functions
696 * we construct a copy of the rate grid used by the scheduler
697 * in order to construct the rate to group table below
700 for (j = 0; j < 16; j++) {
701 (*rategrid)[0][j] = rate;
705 for (i = 1; i < 16; i++)
706 for (j = 0; j < 16; j++)
708 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
713 * 2.4 transmit internal function
715 * this table maps the upper 5 bits of exponent and mantissa
716 * of the atm forum representation of the rate into an index
721 while (rate_atmf < 0x400) {
722 man = (rate_atmf & 0x1f) << 4;
723 exp = rate_atmf >> 5;
726 instead of '/ 512', use '>> 9' to prevent a call
727 to divdu3 on x86 platforms
729 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
732 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
734 for (i = 255; i > 0; i--)
735 if ((*rategrid)[i/16][i%16] >= rate_cps)
736 break; /* pick nearest rate instead? */
739 * each table entry is 16 bits: (rate grid index (8 bits)
740 * and a buffer limit (8 bits)
741 * there are two table entries in each 32-bit register
745 buf = rate_cps * he_dev->tx_numbuffs /
746 (he_dev->atm_dev->link_rate * 2);
748 /* this is pretty, but avoids _divdu3 and is mostly correct */
749 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
750 if (rate_cps > (272 * mult))
752 else if (rate_cps > (204 * mult))
754 else if (rate_cps > (136 * mult))
756 else if (rate_cps > (68 * mult))
763 reg = (reg << 16) | ((i << 8) | buf);
765 #define RTGTBL_OFFSET 0x400
768 he_writel_rcm(he_dev, reg,
769 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
779 he_init_group(struct he_dev *he_dev, int group)
783 /* small buffer pool */
784 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
785 CONFIG_RBPS_BUFSIZE, 8, 0);
786 if (he_dev->rbps_pool == NULL) {
787 hprintk("unable to create rbps pages\n");
791 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
792 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
793 if (he_dev->rbps_base == NULL) {
794 hprintk("failed to alloc rbps_base\n");
795 goto out_destroy_rbps_pool;
797 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
798 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
799 if (he_dev->rbps_virt == NULL) {
800 hprintk("failed to alloc rbps_virt\n");
801 goto out_free_rbps_base;
804 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
805 dma_addr_t dma_handle;
808 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
810 goto out_free_rbps_virt;
812 he_dev->rbps_virt[i].virt = cpuaddr;
813 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
814 he_dev->rbps_base[i].phys = dma_handle;
817 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
819 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
820 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
821 G0_RBPS_T + (group * 32));
822 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
823 G0_RBPS_BS + (group * 32));
825 RBP_THRESH(CONFIG_RBPS_THRESH) |
826 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
828 G0_RBPS_QI + (group * 32));
830 /* large buffer pool */
831 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
832 CONFIG_RBPL_BUFSIZE, 8, 0);
833 if (he_dev->rbpl_pool == NULL) {
834 hprintk("unable to create rbpl pool\n");
835 goto out_free_rbps_virt;
838 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
839 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
840 if (he_dev->rbpl_base == NULL) {
841 hprintk("failed to alloc rbpl_base\n");
842 goto out_destroy_rbpl_pool;
844 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
845 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
846 if (he_dev->rbpl_virt == NULL) {
847 hprintk("failed to alloc rbpl_virt\n");
848 goto out_free_rbpl_base;
851 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
852 dma_addr_t dma_handle;
855 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
857 goto out_free_rbpl_virt;
859 he_dev->rbpl_virt[i].virt = cpuaddr;
860 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
861 he_dev->rbpl_base[i].phys = dma_handle;
863 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
865 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
866 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
867 G0_RBPL_T + (group * 32));
868 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
869 G0_RBPL_BS + (group * 32));
871 RBP_THRESH(CONFIG_RBPL_THRESH) |
872 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
874 G0_RBPL_QI + (group * 32));
876 /* rx buffer ready queue */
878 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
879 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
880 if (he_dev->rbrq_base == NULL) {
881 hprintk("failed to allocate rbrq\n");
882 goto out_free_rbpl_virt;
884 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
886 he_dev->rbrq_head = he_dev->rbrq_base;
887 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
888 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
890 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
891 G0_RBRQ_Q + (group * 16));
893 hprintk("coalescing interrupts\n");
894 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
895 G0_RBRQ_I + (group * 16));
897 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
898 G0_RBRQ_I + (group * 16));
900 /* tx buffer ready queue */
902 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
903 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
904 if (he_dev->tbrq_base == NULL) {
905 hprintk("failed to allocate tbrq\n");
906 goto out_free_rbpq_base;
908 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
910 he_dev->tbrq_head = he_dev->tbrq_base;
912 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
913 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
914 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
915 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
920 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
921 sizeof(struct he_rbrq), he_dev->rbrq_base,
923 i = CONFIG_RBPL_SIZE;
926 pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
927 he_dev->rbpl_base[i].phys);
928 kfree(he_dev->rbpl_virt);
931 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
932 sizeof(struct he_rbp), he_dev->rbpl_base,
934 out_destroy_rbpl_pool:
935 pci_pool_destroy(he_dev->rbpl_pool);
937 i = CONFIG_RBPS_SIZE;
940 pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
941 he_dev->rbps_base[i].phys);
942 kfree(he_dev->rbps_virt);
945 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
946 sizeof(struct he_rbp), he_dev->rbps_base,
948 out_destroy_rbps_pool:
949 pci_pool_destroy(he_dev->rbps_pool);
954 he_init_irq(struct he_dev *he_dev)
958 /* 2.9.3.5 tail offset for each interrupt queue is located after the
959 end of the interrupt queue */
961 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
962 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
963 if (he_dev->irq_base == NULL) {
964 hprintk("failed to allocate irq\n");
967 he_dev->irq_tailoffset = (unsigned *)
968 &he_dev->irq_base[CONFIG_IRQ_SIZE];
969 *he_dev->irq_tailoffset = 0;
970 he_dev->irq_head = he_dev->irq_base;
971 he_dev->irq_tail = he_dev->irq_base;
973 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
974 he_dev->irq_base[i].isw = ITYPE_INVALID;
976 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
978 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
980 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
981 he_writel(he_dev, 0x0, IRQ0_DATA);
983 he_writel(he_dev, 0x0, IRQ1_BASE);
984 he_writel(he_dev, 0x0, IRQ1_HEAD);
985 he_writel(he_dev, 0x0, IRQ1_CNTL);
986 he_writel(he_dev, 0x0, IRQ1_DATA);
988 he_writel(he_dev, 0x0, IRQ2_BASE);
989 he_writel(he_dev, 0x0, IRQ2_HEAD);
990 he_writel(he_dev, 0x0, IRQ2_CNTL);
991 he_writel(he_dev, 0x0, IRQ2_DATA);
993 he_writel(he_dev, 0x0, IRQ3_BASE);
994 he_writel(he_dev, 0x0, IRQ3_HEAD);
995 he_writel(he_dev, 0x0, IRQ3_CNTL);
996 he_writel(he_dev, 0x0, IRQ3_DATA);
998 /* 2.9.3.2 interrupt queue mapping registers */
1000 he_writel(he_dev, 0x0, GRP_10_MAP);
1001 he_writel(he_dev, 0x0, GRP_32_MAP);
1002 he_writel(he_dev, 0x0, GRP_54_MAP);
1003 he_writel(he_dev, 0x0, GRP_76_MAP);
1005 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1006 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1010 he_dev->irq = he_dev->pci_dev->irq;
1015 static int __devinit
1016 he_start(struct atm_dev *dev)
1018 struct he_dev *he_dev;
1019 struct pci_dev *pci_dev;
1020 unsigned long membase;
1023 u32 gen_cntl_0, host_cntl, lb_swap;
1024 u8 cache_size, timer;
1027 unsigned int status, reg;
1030 he_dev = HE_DEV(dev);
1031 pci_dev = he_dev->pci_dev;
1033 membase = pci_resource_start(pci_dev, 0);
1034 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1037 * pci bus controller initialization
1040 /* 4.3 pci bus controller-specific initialization */
1041 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1042 hprintk("can't read GEN_CNTL_0\n");
1045 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1046 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1047 hprintk("can't write GEN_CNTL_0.\n");
1051 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1052 hprintk("can't read PCI_COMMAND.\n");
1056 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1057 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1058 hprintk("can't enable memory.\n");
1062 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1063 hprintk("can't read cache line size?\n");
1067 if (cache_size < 16) {
1069 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1070 hprintk("can't set cache line size to %d\n", cache_size);
1073 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1074 hprintk("can't read latency timer?\n");
1080 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1082 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1083 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1086 #define LAT_TIMER 209
1087 if (timer < LAT_TIMER) {
1088 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1090 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1091 hprintk("can't set latency timer to %d\n", timer);
1094 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1095 hprintk("can't set up page mapping\n");
1099 /* 4.4 card reset */
1100 he_writel(he_dev, 0x0, RESET_CNTL);
1101 he_writel(he_dev, 0xff, RESET_CNTL);
1103 udelay(16*1000); /* 16 ms */
1104 status = he_readl(he_dev, RESET_CNTL);
1105 if ((status & BOARD_RST_STATUS) == 0) {
1106 hprintk("reset failed\n");
1110 /* 4.5 set bus width */
1111 host_cntl = he_readl(he_dev, HOST_CNTL);
1112 if (host_cntl & PCI_BUS_SIZE64)
1113 gen_cntl_0 |= ENBL_64;
1115 gen_cntl_0 &= ~ENBL_64;
1117 if (disable64 == 1) {
1118 hprintk("disabling 64-bit pci bus transfers\n");
1119 gen_cntl_0 &= ~ENBL_64;
1122 if (gen_cntl_0 & ENBL_64)
1123 hprintk("64-bit transfers enabled\n");
1125 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127 /* 4.7 read prom contents */
1128 for (i = 0; i < PROD_ID_LEN; ++i)
1129 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1131 he_dev->media = read_prom_byte(he_dev, MEDIA);
1133 for (i = 0; i < 6; ++i)
1134 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1136 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1138 he_dev->media & 0x40 ? "SM" : "MM",
1145 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1146 ATM_OC12_PCR : ATM_OC3_PCR;
1148 /* 4.6 set host endianess */
1149 lb_swap = he_readl(he_dev, LB_SWAP);
1150 if (he_is622(he_dev))
1151 lb_swap &= ~XFER_SIZE; /* 4 cells */
1153 lb_swap |= XFER_SIZE; /* 8 cells */
1155 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1157 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1158 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1159 #endif /* __BIG_ENDIAN */
1160 he_writel(he_dev, lb_swap, LB_SWAP);
1162 /* 4.8 sdram controller initialization */
1163 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1165 /* 4.9 initialize rnum value */
1166 lb_swap |= SWAP_RNUM_MAX(0xf);
1167 he_writel(he_dev, lb_swap, LB_SWAP);
1169 /* 4.10 initialize the interrupt queues */
1170 if ((err = he_init_irq(he_dev)) != 0)
1173 /* 4.11 enable pci bus controller state machines */
1174 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1175 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1176 he_writel(he_dev, host_cntl, HOST_CNTL);
1178 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1179 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1182 * atm network controller initialization
1185 /* 5.1.1 generic configuration state */
1188 * local (cell) buffer memory map
1192 * 0 ____________1023 bytes 0 _______________________2047 bytes
1194 * | utility | | rx0 | |
1195 * 5|____________| 255|___________________| u |
1198 * | rx0 | row | tx | l |
1200 * | | 767|___________________| t |
1201 * 517|____________| 768| | y |
1202 * row 518| | | rx1 | |
1203 * | | 1023|___________________|___|
1208 * 1535|____________|
1211 * 2047|____________|
1215 /* total 4096 connections */
1216 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1217 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1219 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1220 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1224 if (nvpibits != -1) {
1225 he_dev->vpibits = nvpibits;
1226 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1229 if (nvcibits != -1) {
1230 he_dev->vcibits = nvcibits;
1231 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1235 if (he_is622(he_dev)) {
1236 he_dev->cells_per_row = 40;
1237 he_dev->bytes_per_row = 2048;
1238 he_dev->r0_numrows = 256;
1239 he_dev->tx_numrows = 512;
1240 he_dev->r1_numrows = 256;
1241 he_dev->r0_startrow = 0;
1242 he_dev->tx_startrow = 256;
1243 he_dev->r1_startrow = 768;
1245 he_dev->cells_per_row = 20;
1246 he_dev->bytes_per_row = 1024;
1247 he_dev->r0_numrows = 512;
1248 he_dev->tx_numrows = 1018;
1249 he_dev->r1_numrows = 512;
1250 he_dev->r0_startrow = 6;
1251 he_dev->tx_startrow = 518;
1252 he_dev->r1_startrow = 1536;
1255 he_dev->cells_per_lbuf = 4;
1256 he_dev->buffer_limit = 4;
1257 he_dev->r0_numbuffs = he_dev->r0_numrows *
1258 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1259 if (he_dev->r0_numbuffs > 2560)
1260 he_dev->r0_numbuffs = 2560;
1262 he_dev->r1_numbuffs = he_dev->r1_numrows *
1263 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1264 if (he_dev->r1_numbuffs > 2560)
1265 he_dev->r1_numbuffs = 2560;
1267 he_dev->tx_numbuffs = he_dev->tx_numrows *
1268 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1269 if (he_dev->tx_numbuffs > 5120)
1270 he_dev->tx_numbuffs = 5120;
1272 /* 5.1.2 configure hardware dependent registers */
1275 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1276 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1277 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1278 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1281 he_writel(he_dev, BANK_ON |
1282 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1286 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1287 RM_RW_WAIT(1), RCMCONFIG);
1289 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1290 TM_RW_WAIT(1), TCMCONFIG);
1292 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1295 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1296 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1297 RX_VALVP(he_dev->vpibits) |
1298 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1300 he_writel(he_dev, DRF_THRESH(0x20) |
1301 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1302 TX_VCI_MASK(he_dev->vcibits) |
1303 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1305 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1307 he_writel(he_dev, PHY_INT_ENB |
1308 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1311 /* 5.1.3 initialize connection memory */
1313 for (i = 0; i < TCM_MEM_SIZE; ++i)
1314 he_writel_tcm(he_dev, 0, i);
1316 for (i = 0; i < RCM_MEM_SIZE; ++i)
1317 he_writel_rcm(he_dev, 0, i);
1320 * transmit connection memory map
1323 * 0x0 ___________________
1329 * 0x8000|___________________|
1332 * 0xc000|___________________|
1335 * 0xe000|___________________|
1337 * 0xf000|___________________|
1339 * 0x10000|___________________|
1342 * |___________________|
1345 * 0x1ffff|___________________|
1350 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1351 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1352 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1353 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1354 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1358 * receive connection memory map
1360 * 0x0 ___________________
1366 * 0x8000|___________________|
1369 * | LBM | link lists of local
1370 * | tx | buffer memory
1372 * 0xd000|___________________|
1375 * 0xe000|___________________|
1378 * |___________________|
1381 * 0xffff|___________________|
1384 he_writel(he_dev, 0x08000, RCMLBM_BA);
1385 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1386 he_writel(he_dev, 0x0d800, RCMABR_BA);
1388 /* 5.1.4 initialize local buffer free pools linked lists */
1390 he_init_rx_lbfp0(he_dev);
1391 he_init_rx_lbfp1(he_dev);
1393 he_writel(he_dev, 0x0, RLBC_H);
1394 he_writel(he_dev, 0x0, RLBC_T);
1395 he_writel(he_dev, 0x0, RLBC_H2);
1397 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1398 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1400 he_init_tx_lbfp(he_dev);
1402 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1404 /* 5.1.5 initialize intermediate receive queues */
1406 if (he_is622(he_dev)) {
1407 he_writel(he_dev, 0x000f, G0_INMQ_S);
1408 he_writel(he_dev, 0x200f, G0_INMQ_L);
1410 he_writel(he_dev, 0x001f, G1_INMQ_S);
1411 he_writel(he_dev, 0x201f, G1_INMQ_L);
1413 he_writel(he_dev, 0x002f, G2_INMQ_S);
1414 he_writel(he_dev, 0x202f, G2_INMQ_L);
1416 he_writel(he_dev, 0x003f, G3_INMQ_S);
1417 he_writel(he_dev, 0x203f, G3_INMQ_L);
1419 he_writel(he_dev, 0x004f, G4_INMQ_S);
1420 he_writel(he_dev, 0x204f, G4_INMQ_L);
1422 he_writel(he_dev, 0x005f, G5_INMQ_S);
1423 he_writel(he_dev, 0x205f, G5_INMQ_L);
1425 he_writel(he_dev, 0x006f, G6_INMQ_S);
1426 he_writel(he_dev, 0x206f, G6_INMQ_L);
1428 he_writel(he_dev, 0x007f, G7_INMQ_S);
1429 he_writel(he_dev, 0x207f, G7_INMQ_L);
1431 he_writel(he_dev, 0x0000, G0_INMQ_S);
1432 he_writel(he_dev, 0x0008, G0_INMQ_L);
1434 he_writel(he_dev, 0x0001, G1_INMQ_S);
1435 he_writel(he_dev, 0x0009, G1_INMQ_L);
1437 he_writel(he_dev, 0x0002, G2_INMQ_S);
1438 he_writel(he_dev, 0x000a, G2_INMQ_L);
1440 he_writel(he_dev, 0x0003, G3_INMQ_S);
1441 he_writel(he_dev, 0x000b, G3_INMQ_L);
1443 he_writel(he_dev, 0x0004, G4_INMQ_S);
1444 he_writel(he_dev, 0x000c, G4_INMQ_L);
1446 he_writel(he_dev, 0x0005, G5_INMQ_S);
1447 he_writel(he_dev, 0x000d, G5_INMQ_L);
1449 he_writel(he_dev, 0x0006, G6_INMQ_S);
1450 he_writel(he_dev, 0x000e, G6_INMQ_L);
1452 he_writel(he_dev, 0x0007, G7_INMQ_S);
1453 he_writel(he_dev, 0x000f, G7_INMQ_L);
1456 /* 5.1.6 application tunable parameters */
1458 he_writel(he_dev, 0x0, MCC);
1459 he_writel(he_dev, 0x0, OEC);
1460 he_writel(he_dev, 0x0, DCC);
1461 he_writel(he_dev, 0x0, CEC);
1463 /* 5.1.7 cs block initialization */
1465 he_init_cs_block(he_dev);
1467 /* 5.1.8 cs block connection memory initialization */
1469 if (he_init_cs_block_rcm(he_dev) < 0)
1472 /* 5.1.10 initialize host structures */
1474 he_init_tpdrq(he_dev);
1476 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1477 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1478 if (he_dev->tpd_pool == NULL) {
1479 hprintk("unable to create tpd pci_pool\n");
1483 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1485 if (he_init_group(he_dev, 0) != 0)
1488 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1489 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1490 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1491 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1492 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1493 G0_RBPS_BS + (group * 32));
1495 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1496 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1497 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1498 G0_RBPL_QI + (group * 32));
1499 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1501 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1502 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1503 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1504 G0_RBRQ_Q + (group * 16));
1505 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1507 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1508 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1509 he_writel(he_dev, TBRQ_THRESH(0x1),
1510 G0_TBRQ_THRESH + (group * 16));
1511 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1514 /* host status page */
1516 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1517 sizeof(struct he_hsp), &he_dev->hsp_phys);
1518 if (he_dev->hsp == NULL) {
1519 hprintk("failed to allocate host status page\n");
1522 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1523 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1525 /* initialize framer */
1527 #ifdef CONFIG_ATM_HE_USE_SUNI
1528 if (he_isMM(he_dev))
1529 suni_init(he_dev->atm_dev);
1530 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1531 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1532 #endif /* CONFIG_ATM_HE_USE_SUNI */
1535 /* this really should be in suni.c but for now... */
1538 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1539 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1540 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1541 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1544 /* 5.1.12 enable transmit and receive */
1546 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547 reg |= TX_ENABLE|ER_ENABLE;
1548 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1550 reg = he_readl(he_dev, RC_CONFIG);
1552 he_writel(he_dev, reg, RC_CONFIG);
1554 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1555 he_dev->cs_stper[i].inuse = 0;
1556 he_dev->cs_stper[i].pcr = -1;
1558 he_dev->total_bw = 0;
1561 /* atm linux initialization */
1563 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1564 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1566 he_dev->irq_peak = 0;
1567 he_dev->rbrq_peak = 0;
1568 he_dev->rbpl_peak = 0;
1569 he_dev->tbrq_peak = 0;
1571 HPRINTK("hell bent for leather!\n");
1577 he_stop(struct he_dev *he_dev)
1580 u32 gen_cntl_0, reg;
1581 struct pci_dev *pci_dev;
1583 pci_dev = he_dev->pci_dev;
1585 /* disable interrupts */
1587 if (he_dev->membase) {
1588 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1589 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1590 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1592 tasklet_disable(&he_dev->tasklet);
1594 /* disable recv and transmit */
1596 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1597 reg &= ~(TX_ENABLE|ER_ENABLE);
1598 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1600 reg = he_readl(he_dev, RC_CONFIG);
1601 reg &= ~(RX_ENABLE);
1602 he_writel(he_dev, reg, RC_CONFIG);
1605 #ifdef CONFIG_ATM_HE_USE_SUNI
1606 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1607 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1608 #endif /* CONFIG_ATM_HE_USE_SUNI */
1611 free_irq(he_dev->irq, he_dev);
1613 if (he_dev->irq_base)
1614 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1615 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1618 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1619 he_dev->hsp, he_dev->hsp_phys);
1621 if (he_dev->rbpl_base) {
1624 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1625 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1626 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1628 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1630 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1631 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1634 if (he_dev->rbpl_pool)
1635 pci_pool_destroy(he_dev->rbpl_pool);
1637 if (he_dev->rbps_base) {
1640 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1641 void *cpuaddr = he_dev->rbps_virt[i].virt;
1642 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1644 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1646 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1647 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1650 if (he_dev->rbps_pool)
1651 pci_pool_destroy(he_dev->rbps_pool);
1653 if (he_dev->rbrq_base)
1654 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1655 he_dev->rbrq_base, he_dev->rbrq_phys);
1657 if (he_dev->tbrq_base)
1658 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1659 he_dev->tbrq_base, he_dev->tbrq_phys);
1661 if (he_dev->tpdrq_base)
1662 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1663 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1665 if (he_dev->tpd_pool)
1666 pci_pool_destroy(he_dev->tpd_pool);
1668 if (he_dev->pci_dev) {
1669 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1670 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1671 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1674 if (he_dev->membase)
1675 iounmap(he_dev->membase);
1678 static struct he_tpd *
1679 __alloc_tpd(struct he_dev *he_dev)
1682 dma_addr_t dma_handle;
1684 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1688 tpd->status = TPD_ADDR(dma_handle);
1690 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1691 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1692 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1697 #define AAL5_LEN(buf,len) \
1698 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1699 (((unsigned char *)(buf))[(len)-5]))
1703 * aal5 packets can optionally return the tcp checksum in the lower
1704 * 16 bits of the crc (RSR0_TCP_CKSUM)
1707 #define TCP_CKSUM(buf,len) \
1708 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1709 (((unsigned char *)(buf))[(len-1)]))
1712 he_service_rbrq(struct he_dev *he_dev, int group)
1714 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1715 ((unsigned long)he_dev->rbrq_base |
1716 he_dev->hsp->group[group].rbrq_tail);
1717 struct he_rbp *rbp = NULL;
1718 unsigned cid, lastcid = -1;
1719 unsigned buf_len = 0;
1720 struct sk_buff *skb;
1721 struct atm_vcc *vcc = NULL;
1722 struct he_vcc *he_vcc;
1723 struct he_iovec *iov;
1724 int pdus_assembled = 0;
1727 read_lock(&vcc_sklist_lock);
1728 while (he_dev->rbrq_head != rbrq_tail) {
1731 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1732 he_dev->rbrq_head, group,
1733 RBRQ_ADDR(he_dev->rbrq_head),
1734 RBRQ_BUFLEN(he_dev->rbrq_head),
1735 RBRQ_CID(he_dev->rbrq_head),
1736 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1737 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1738 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1739 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1740 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1741 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1743 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1744 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1746 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1748 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1749 cid = RBRQ_CID(he_dev->rbrq_head);
1752 vcc = __find_vcc(he_dev, cid);
1756 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1757 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1758 rbp->status &= ~RBP_LOANED;
1760 goto next_rbrq_entry;
1763 he_vcc = HE_VCC(vcc);
1764 if (he_vcc == NULL) {
1765 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1766 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1767 rbp->status &= ~RBP_LOANED;
1768 goto next_rbrq_entry;
1771 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1772 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1773 atomic_inc(&vcc->stats->rx_drop);
1774 goto return_host_buffers;
1777 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1778 he_vcc->iov_tail->iov_len = buf_len;
1779 he_vcc->pdu_len += buf_len;
1782 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1784 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1785 wake_up(&he_vcc->rx_waitq);
1786 goto return_host_buffers;
1790 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1791 hprintk("iovec full! cid 0x%x\n", cid);
1792 goto return_host_buffers;
1795 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1796 goto next_rbrq_entry;
1798 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1799 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1800 HPRINTK("%s%s (%d.%d)\n",
1801 RBRQ_CRC_ERR(he_dev->rbrq_head)
1803 RBRQ_LEN_ERR(he_dev->rbrq_head)
1805 vcc->vpi, vcc->vci);
1806 atomic_inc(&vcc->stats->rx_err);
1807 goto return_host_buffers;
1810 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1813 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1814 goto return_host_buffers;
1817 if (rx_skb_reserve > 0)
1818 skb_reserve(skb, rx_skb_reserve);
1820 __net_timestamp(skb);
1822 for (iov = he_vcc->iov_head;
1823 iov < he_vcc->iov_tail; ++iov) {
1824 if (iov->iov_base & RBP_SMALLBUF)
1825 memcpy(skb_put(skb, iov->iov_len),
1826 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1828 memcpy(skb_put(skb, iov->iov_len),
1829 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1832 switch (vcc->qos.aal) {
1834 /* 2.10.1.5 raw cell receive */
1835 skb->len = ATM_AAL0_SDU;
1836 skb_set_tail_pointer(skb, skb->len);
1839 /* 2.10.1.2 aal5 receive */
1841 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1842 skb_set_tail_pointer(skb, skb->len);
1843 #ifdef USE_CHECKSUM_HW
1844 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1845 skb->ip_summed = CHECKSUM_COMPLETE;
1846 skb->csum = TCP_CKSUM(skb->data,
1853 #ifdef should_never_happen
1854 if (skb->len > vcc->qos.rxtp.max_sdu)
1855 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1859 ATM_SKB(skb)->vcc = vcc;
1861 spin_unlock(&he_dev->global_lock);
1862 vcc->push(vcc, skb);
1863 spin_lock(&he_dev->global_lock);
1865 atomic_inc(&vcc->stats->rx);
1867 return_host_buffers:
1870 for (iov = he_vcc->iov_head;
1871 iov < he_vcc->iov_tail; ++iov) {
1872 if (iov->iov_base & RBP_SMALLBUF)
1873 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1875 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1877 rbp->status &= ~RBP_LOANED;
1880 he_vcc->iov_tail = he_vcc->iov_head;
1881 he_vcc->pdu_len = 0;
1884 he_dev->rbrq_head = (struct he_rbrq *)
1885 ((unsigned long) he_dev->rbrq_base |
1886 RBRQ_MASK(++he_dev->rbrq_head));
1889 read_unlock(&vcc_sklist_lock);
1892 if (updated > he_dev->rbrq_peak)
1893 he_dev->rbrq_peak = updated;
1895 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1896 G0_RBRQ_H + (group * 16));
1899 return pdus_assembled;
1903 he_service_tbrq(struct he_dev *he_dev, int group)
1905 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1906 ((unsigned long)he_dev->tbrq_base |
1907 he_dev->hsp->group[group].tbrq_tail);
1909 int slot, updated = 0;
1910 struct he_tpd *__tpd;
1912 /* 2.1.6 transmit buffer return queue */
1914 while (he_dev->tbrq_head != tbrq_tail) {
1917 HPRINTK("tbrq%d 0x%x%s%s\n",
1919 TBRQ_TPD(he_dev->tbrq_head),
1920 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1921 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1923 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1924 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1926 list_del(&__tpd->entry);
1932 hprintk("unable to locate tpd for dma buffer %x\n",
1933 TBRQ_TPD(he_dev->tbrq_head));
1934 goto next_tbrq_entry;
1937 if (TBRQ_EOS(he_dev->tbrq_head)) {
1938 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1939 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1941 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1943 goto next_tbrq_entry;
1946 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1947 if (tpd->iovec[slot].addr)
1948 pci_unmap_single(he_dev->pci_dev,
1949 tpd->iovec[slot].addr,
1950 tpd->iovec[slot].len & TPD_LEN_MASK,
1952 if (tpd->iovec[slot].len & TPD_LST)
1957 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1958 if (tpd->vcc && tpd->vcc->pop)
1959 tpd->vcc->pop(tpd->vcc, tpd->skb);
1961 dev_kfree_skb_any(tpd->skb);
1966 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1967 he_dev->tbrq_head = (struct he_tbrq *)
1968 ((unsigned long) he_dev->tbrq_base |
1969 TBRQ_MASK(++he_dev->tbrq_head));
1973 if (updated > he_dev->tbrq_peak)
1974 he_dev->tbrq_peak = updated;
1976 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1977 G0_TBRQ_H + (group * 16));
1983 he_service_rbpl(struct he_dev *he_dev, int group)
1985 struct he_rbp *newtail;
1986 struct he_rbp *rbpl_head;
1989 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1990 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1993 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1994 RBPL_MASK(he_dev->rbpl_tail+1));
1996 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1997 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2000 newtail->status |= RBP_LOANED;
2001 he_dev->rbpl_tail = newtail;
2006 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2010 he_service_rbps(struct he_dev *he_dev, int group)
2012 struct he_rbp *newtail;
2013 struct he_rbp *rbps_head;
2016 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2017 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2020 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2021 RBPS_MASK(he_dev->rbps_tail+1));
2023 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2024 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2027 newtail->status |= RBP_LOANED;
2028 he_dev->rbps_tail = newtail;
2033 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2037 he_tasklet(unsigned long data)
2039 unsigned long flags;
2040 struct he_dev *he_dev = (struct he_dev *) data;
2044 HPRINTK("tasklet (0x%lx)\n", data);
2045 spin_lock_irqsave(&he_dev->global_lock, flags);
2047 while (he_dev->irq_head != he_dev->irq_tail) {
2050 type = ITYPE_TYPE(he_dev->irq_head->isw);
2051 group = ITYPE_GROUP(he_dev->irq_head->isw);
2054 case ITYPE_RBRQ_THRESH:
2055 HPRINTK("rbrq%d threshold\n", group);
2057 case ITYPE_RBRQ_TIMER:
2058 if (he_service_rbrq(he_dev, group)) {
2059 he_service_rbpl(he_dev, group);
2060 he_service_rbps(he_dev, group);
2063 case ITYPE_TBRQ_THRESH:
2064 HPRINTK("tbrq%d threshold\n", group);
2066 case ITYPE_TPD_COMPLETE:
2067 he_service_tbrq(he_dev, group);
2069 case ITYPE_RBPL_THRESH:
2070 he_service_rbpl(he_dev, group);
2072 case ITYPE_RBPS_THRESH:
2073 he_service_rbps(he_dev, group);
2076 HPRINTK("phy interrupt\n");
2077 #ifdef CONFIG_ATM_HE_USE_SUNI
2078 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2079 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2080 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2081 spin_lock_irqsave(&he_dev->global_lock, flags);
2085 switch (type|group) {
2087 hprintk("parity error\n");
2090 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2094 case ITYPE_TYPE(ITYPE_INVALID):
2095 /* see 8.1.1 -- check all queues */
2097 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2099 he_service_rbrq(he_dev, 0);
2100 he_service_rbpl(he_dev, 0);
2101 he_service_rbps(he_dev, 0);
2102 he_service_tbrq(he_dev, 0);
2105 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2108 he_dev->irq_head->isw = ITYPE_INVALID;
2110 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2114 if (updated > he_dev->irq_peak)
2115 he_dev->irq_peak = updated;
2118 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2119 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2120 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2121 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2123 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2127 he_irq_handler(int irq, void *dev_id)
2129 unsigned long flags;
2130 struct he_dev *he_dev = (struct he_dev * )dev_id;
2136 spin_lock_irqsave(&he_dev->global_lock, flags);
2138 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2139 (*he_dev->irq_tailoffset << 2));
2141 if (he_dev->irq_tail == he_dev->irq_head) {
2142 HPRINTK("tailoffset not updated?\n");
2143 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2144 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2145 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2149 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2150 hprintk("spurious (or shared) interrupt?\n");
2153 if (he_dev->irq_head != he_dev->irq_tail) {
2155 tasklet_schedule(&he_dev->tasklet);
2156 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2157 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2159 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2160 return IRQ_RETVAL(handled);
2164 static __inline__ void
2165 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2167 struct he_tpdrq *new_tail;
2169 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2170 tpd, cid, he_dev->tpdrq_tail);
2172 /* new_tail = he_dev->tpdrq_tail; */
2173 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2174 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2177 * check to see if we are about to set the tail == head
2178 * if true, update the head pointer from the adapter
2179 * to see if this is really the case (reading the queue
2180 * head for every enqueue would be unnecessarily slow)
2183 if (new_tail == he_dev->tpdrq_head) {
2184 he_dev->tpdrq_head = (struct he_tpdrq *)
2185 (((unsigned long)he_dev->tpdrq_base) |
2186 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2188 if (new_tail == he_dev->tpdrq_head) {
2191 hprintk("tpdrq full (cid 0x%x)\n", cid);
2194 * push tpd onto a transmit backlog queue
2195 * after service_tbrq, service the backlog
2196 * for now, we just drop the pdu
2198 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2199 if (tpd->iovec[slot].addr)
2200 pci_unmap_single(he_dev->pci_dev,
2201 tpd->iovec[slot].addr,
2202 tpd->iovec[slot].len & TPD_LEN_MASK,
2207 tpd->vcc->pop(tpd->vcc, tpd->skb);
2209 dev_kfree_skb_any(tpd->skb);
2210 atomic_inc(&tpd->vcc->stats->tx_err);
2212 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2217 /* 2.1.5 transmit packet descriptor ready queue */
2218 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2219 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2220 he_dev->tpdrq_tail->cid = cid;
2223 he_dev->tpdrq_tail = new_tail;
2225 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2226 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2230 he_open(struct atm_vcc *vcc)
2232 unsigned long flags;
2233 struct he_dev *he_dev = HE_DEV(vcc->dev);
2234 struct he_vcc *he_vcc;
2236 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2237 short vpi = vcc->vpi;
2240 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2243 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2245 set_bit(ATM_VF_ADDR, &vcc->flags);
2247 cid = he_mkcid(he_dev, vpi, vci);
2249 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2250 if (he_vcc == NULL) {
2251 hprintk("unable to allocate he_vcc during open\n");
2255 he_vcc->iov_tail = he_vcc->iov_head;
2256 he_vcc->pdu_len = 0;
2257 he_vcc->rc_index = -1;
2259 init_waitqueue_head(&he_vcc->rx_waitq);
2260 init_waitqueue_head(&he_vcc->tx_waitq);
2262 vcc->dev_data = he_vcc;
2264 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2267 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2269 pcr_goal = he_dev->atm_dev->link_rate;
2270 if (pcr_goal < 0) /* means round down, technically */
2271 pcr_goal = -pcr_goal;
2273 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2275 switch (vcc->qos.aal) {
2277 tsr0_aal = TSR0_AAL5;
2281 tsr0_aal = TSR0_AAL0_SDU;
2282 tsr4 = TSR4_AAL0_SDU;
2289 spin_lock_irqsave(&he_dev->global_lock, flags);
2290 tsr0 = he_readl_tsr0(he_dev, cid);
2291 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2293 if (TSR0_CONN_STATE(tsr0) != 0) {
2294 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2299 switch (vcc->qos.txtp.traffic_class) {
2301 /* 2.3.3.1 open connection ubr */
2303 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2304 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2308 /* 2.3.3.2 open connection cbr */
2310 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2311 if ((he_dev->total_bw + pcr_goal)
2312 > (he_dev->atm_dev->link_rate * 9 / 10))
2318 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2320 /* find an unused cs_stper register */
2321 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2322 if (he_dev->cs_stper[reg].inuse == 0 ||
2323 he_dev->cs_stper[reg].pcr == pcr_goal)
2326 if (reg == HE_NUM_CS_STPER) {
2328 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2332 he_dev->total_bw += pcr_goal;
2334 he_vcc->rc_index = reg;
2335 ++he_dev->cs_stper[reg].inuse;
2336 he_dev->cs_stper[reg].pcr = pcr_goal;
2338 clock = he_is622(he_dev) ? 66667000 : 50000000;
2339 period = clock / pcr_goal;
2341 HPRINTK("rc_index = %d period = %d\n",
2344 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2346 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2348 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2357 spin_lock_irqsave(&he_dev->global_lock, flags);
2359 he_writel_tsr0(he_dev, tsr0, cid);
2360 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2361 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2362 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2363 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2364 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2366 he_writel_tsr3(he_dev, 0x0, cid);
2367 he_writel_tsr5(he_dev, 0x0, cid);
2368 he_writel_tsr6(he_dev, 0x0, cid);
2369 he_writel_tsr7(he_dev, 0x0, cid);
2370 he_writel_tsr8(he_dev, 0x0, cid);
2371 he_writel_tsr10(he_dev, 0x0, cid);
2372 he_writel_tsr11(he_dev, 0x0, cid);
2373 he_writel_tsr12(he_dev, 0x0, cid);
2374 he_writel_tsr13(he_dev, 0x0, cid);
2375 he_writel_tsr14(he_dev, 0x0, cid);
2376 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2377 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2380 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2383 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2384 &HE_VCC(vcc)->rx_waitq);
2386 switch (vcc->qos.aal) {
2398 spin_lock_irqsave(&he_dev->global_lock, flags);
2400 rsr0 = he_readl_rsr0(he_dev, cid);
2401 if (rsr0 & RSR0_OPEN_CONN) {
2402 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2404 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2409 rsr1 = RSR1_GROUP(0);
2410 rsr4 = RSR4_GROUP(0);
2411 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2412 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2414 #ifdef USE_CHECKSUM_HW
2415 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2416 rsr0 |= RSR0_TCP_CKSUM;
2419 he_writel_rsr4(he_dev, rsr4, cid);
2420 he_writel_rsr1(he_dev, rsr1, cid);
2421 /* 5.1.11 last parameter initialized should be
2422 the open/closed indication in rsr0 */
2423 he_writel_rsr0(he_dev,
2424 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2425 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2427 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2434 clear_bit(ATM_VF_ADDR, &vcc->flags);
2437 set_bit(ATM_VF_READY, &vcc->flags);
2443 he_close(struct atm_vcc *vcc)
2445 unsigned long flags;
2446 DECLARE_WAITQUEUE(wait, current);
2447 struct he_dev *he_dev = HE_DEV(vcc->dev);
2450 struct he_vcc *he_vcc = HE_VCC(vcc);
2451 #define MAX_RETRY 30
2452 int retry = 0, sleep = 1, tx_inuse;
2454 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2456 clear_bit(ATM_VF_READY, &vcc->flags);
2457 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2459 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2462 HPRINTK("close rx cid 0x%x\n", cid);
2464 /* 2.7.2.2 close receive operation */
2466 /* wait for previous close (if any) to finish */
2468 spin_lock_irqsave(&he_dev->global_lock, flags);
2469 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2470 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2474 set_current_state(TASK_UNINTERRUPTIBLE);
2475 add_wait_queue(&he_vcc->rx_waitq, &wait);
2477 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2478 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2479 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2480 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2482 timeout = schedule_timeout(30*HZ);
2484 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2485 set_current_state(TASK_RUNNING);
2488 hprintk("close rx timeout cid 0x%x\n", cid);
2490 HPRINTK("close rx cid 0x%x complete\n", cid);
2494 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2495 volatile unsigned tsr4, tsr0;
2498 HPRINTK("close tx cid 0x%x\n", cid);
2502 * ... the host must first stop queueing packets to the TPDRQ
2503 * on the connection to be closed, then wait for all outstanding
2504 * packets to be transmitted and their buffers returned to the
2505 * TBRQ. When the last packet on the connection arrives in the
2506 * TBRQ, the host issues the close command to the adapter.
2509 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2510 (retry < MAX_RETRY)) {
2519 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2521 /* 2.3.1.1 generic close operations with flush */
2523 spin_lock_irqsave(&he_dev->global_lock, flags);
2524 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2525 /* also clears TSR4_SESSION_ENDED */
2527 switch (vcc->qos.txtp.traffic_class) {
2529 he_writel_tsr1(he_dev,
2530 TSR1_MCR(rate_to_atmf(200000))
2531 | TSR1_PCR(0), cid);
2534 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2537 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2539 tpd = __alloc_tpd(he_dev);
2541 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2542 goto close_tx_incomplete;
2544 tpd->status |= TPD_EOS | TPD_INT;
2549 set_current_state(TASK_UNINTERRUPTIBLE);
2550 add_wait_queue(&he_vcc->tx_waitq, &wait);
2551 __enqueue_tpd(he_dev, tpd, cid);
2552 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2554 timeout = schedule_timeout(30*HZ);
2556 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2557 set_current_state(TASK_RUNNING);
2559 spin_lock_irqsave(&he_dev->global_lock, flags);
2562 hprintk("close tx timeout cid 0x%x\n", cid);
2563 goto close_tx_incomplete;
2566 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2567 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2571 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2572 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2576 close_tx_incomplete:
2578 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2579 int reg = he_vcc->rc_index;
2581 HPRINTK("cs_stper reg = %d\n", reg);
2583 if (he_dev->cs_stper[reg].inuse == 0)
2584 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2586 --he_dev->cs_stper[reg].inuse;
2588 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2590 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2592 HPRINTK("close tx cid 0x%x complete\n", cid);
2597 clear_bit(ATM_VF_ADDR, &vcc->flags);
2601 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2603 unsigned long flags;
2604 struct he_dev *he_dev = HE_DEV(vcc->dev);
2605 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2607 #ifdef USE_SCATTERGATHER
2611 #define HE_TPD_BUFSIZE 0xffff
2613 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2615 if ((skb->len > HE_TPD_BUFSIZE) ||
2616 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2617 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2621 dev_kfree_skb_any(skb);
2622 atomic_inc(&vcc->stats->tx_err);
2626 #ifndef USE_SCATTERGATHER
2627 if (skb_shinfo(skb)->nr_frags) {
2628 hprintk("no scatter/gather support\n");
2632 dev_kfree_skb_any(skb);
2633 atomic_inc(&vcc->stats->tx_err);
2637 spin_lock_irqsave(&he_dev->global_lock, flags);
2639 tpd = __alloc_tpd(he_dev);
2644 dev_kfree_skb_any(skb);
2645 atomic_inc(&vcc->stats->tx_err);
2646 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2650 if (vcc->qos.aal == ATM_AAL5)
2651 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2653 char *pti_clp = (void *) (skb->data + 3);
2656 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2657 clp = (*pti_clp & ATM_HDR_CLP);
2658 tpd->status |= TPD_CELLTYPE(pti);
2660 tpd->status |= TPD_CLP;
2662 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2665 #ifdef USE_SCATTERGATHER
2666 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2667 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2668 tpd->iovec[slot].len = skb->len - skb->data_len;
2671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2672 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2674 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2676 tpd->skb = NULL; /* not the last fragment
2677 so dont ->push() yet */
2680 __enqueue_tpd(he_dev, tpd, cid);
2681 tpd = __alloc_tpd(he_dev);
2686 dev_kfree_skb_any(skb);
2687 atomic_inc(&vcc->stats->tx_err);
2688 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2691 tpd->status |= TPD_USERCELL;
2695 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2696 (void *) page_address(frag->page) + frag->page_offset,
2697 frag->size, PCI_DMA_TODEVICE);
2698 tpd->iovec[slot].len = frag->size;
2703 tpd->iovec[slot - 1].len |= TPD_LST;
2705 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2706 tpd->length0 = skb->len | TPD_LST;
2708 tpd->status |= TPD_INT;
2713 ATM_SKB(skb)->vcc = vcc;
2715 __enqueue_tpd(he_dev, tpd, cid);
2716 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2718 atomic_inc(&vcc->stats->tx);
2724 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2726 unsigned long flags;
2727 struct he_dev *he_dev = HE_DEV(atm_dev);
2728 struct he_ioctl_reg reg;
2733 if (!capable(CAP_NET_ADMIN))
2736 if (copy_from_user(®, arg,
2737 sizeof(struct he_ioctl_reg)))
2740 spin_lock_irqsave(&he_dev->global_lock, flags);
2742 case HE_REGTYPE_PCI:
2743 if (reg.addr >= HE_REGMAP_SIZE) {
2748 reg.val = he_readl(he_dev, reg.addr);
2750 case HE_REGTYPE_RCM:
2752 he_readl_rcm(he_dev, reg.addr);
2754 case HE_REGTYPE_TCM:
2756 he_readl_tcm(he_dev, reg.addr);
2758 case HE_REGTYPE_MBOX:
2760 he_readl_mbox(he_dev, reg.addr);
2766 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2768 if (copy_to_user(arg, ®,
2769 sizeof(struct he_ioctl_reg)))
2773 #ifdef CONFIG_ATM_HE_USE_SUNI
2774 if (atm_dev->phy && atm_dev->phy->ioctl)
2775 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2776 #else /* CONFIG_ATM_HE_USE_SUNI */
2778 #endif /* CONFIG_ATM_HE_USE_SUNI */
2786 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2788 unsigned long flags;
2789 struct he_dev *he_dev = HE_DEV(atm_dev);
2791 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2793 spin_lock_irqsave(&he_dev->global_lock, flags);
2794 he_writel(he_dev, val, FRAMER + (addr*4));
2795 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2796 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2800 static unsigned char
2801 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2803 unsigned long flags;
2804 struct he_dev *he_dev = HE_DEV(atm_dev);
2807 spin_lock_irqsave(&he_dev->global_lock, flags);
2808 reg = he_readl(he_dev, FRAMER + (addr*4));
2809 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2811 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2816 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2818 unsigned long flags;
2819 struct he_dev *he_dev = HE_DEV(dev);
2822 struct he_rbrq *rbrq_tail;
2823 struct he_tpdrq *tpdrq_head;
2824 int rbpl_head, rbpl_tail;
2826 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2831 return sprintf(page, "ATM he driver\n");
2834 return sprintf(page, "%s%s\n\n",
2835 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2838 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2840 spin_lock_irqsave(&he_dev->global_lock, flags);
2841 mcc += he_readl(he_dev, MCC);
2842 oec += he_readl(he_dev, OEC);
2843 dcc += he_readl(he_dev, DCC);
2844 cec += he_readl(he_dev, CEC);
2845 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2848 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2849 mcc, oec, dcc, cec);
2852 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2853 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2856 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2860 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2861 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2864 return sprintf(page, "tbrq_size = %d peak = %d\n",
2865 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2869 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2870 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2872 inuse = rbpl_head - rbpl_tail;
2874 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2875 inuse /= sizeof(struct he_rbp);
2878 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2879 CONFIG_RBPL_SIZE, inuse);
2883 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2885 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2887 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2888 he_dev->cs_stper[i].pcr,
2889 he_dev->cs_stper[i].inuse);
2892 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2893 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2898 /* eeprom routines -- see 4.7 */
2900 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2902 u32 val = 0, tmp_read = 0;
2906 val = readl(he_dev->membase + HOST_CNTL);
2909 /* Turn on write enable */
2911 he_writel(he_dev, val, HOST_CNTL);
2913 /* Send READ instruction */
2914 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2915 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2916 udelay(EEPROM_DELAY);
2919 /* Next, we need to send the byte address to read from */
2920 for (i = 7; i >= 0; i--) {
2921 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2922 udelay(EEPROM_DELAY);
2923 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2924 udelay(EEPROM_DELAY);
2929 val &= 0xFFFFF7FF; /* Turn off write enable */
2930 he_writel(he_dev, val, HOST_CNTL);
2932 /* Now, we can read data from the EEPROM by clocking it in */
2933 for (i = 7; i >= 0; i--) {
2934 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2935 udelay(EEPROM_DELAY);
2936 tmp_read = he_readl(he_dev, HOST_CNTL);
2937 byte_read |= (unsigned char)
2938 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2939 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2940 udelay(EEPROM_DELAY);
2943 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2944 udelay(EEPROM_DELAY);
2949 MODULE_LICENSE("GPL");
2950 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2951 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2952 module_param(disable64, bool, 0);
2953 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2954 module_param(nvpibits, short, 0);
2955 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2956 module_param(nvcibits, short, 0);
2957 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2958 module_param(rx_skb_reserve, short, 0);
2959 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2960 module_param(irq_coalesce, bool, 0);
2961 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2962 module_param(sdh, bool, 0);
2963 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2965 static struct pci_device_id he_pci_tbl[] = {
2966 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2971 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2973 static struct pci_driver he_driver = {
2975 .probe = he_init_one,
2976 .remove = __devexit_p(he_remove_one),
2977 .id_table = he_pci_tbl,
2980 static int __init he_init(void)
2982 return pci_register_driver(&he_driver);
2985 static void __exit he_cleanup(void)
2987 pci_unregister_driver(&he_driver);
2990 module_init(he_init);
2991 module_exit(he_cleanup);