5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
86 #include <linux/atm_he.h>
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93 #define HPRINTK(fmt,args...) do { } while (0)
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
138 CLK_HIGH | SI_HIGH, /* 1 */
140 CLK_HIGH | SI_HIGH /* 1 */
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
164 static struct atmdev_ops he_ops =
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
179 /* section 2.12 connection memory access */
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
218 /* figure 2.2 connection id */
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
222 /* 2.5.1 per connection transmit state registers */
224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
299 /* 2.7.1 per connection receive state registers */
301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
330 struct hlist_head *head;
336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 sk_for_each(s, head) {
342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
351 static int he_init_one(struct pci_dev *pci_dev,
352 const struct pci_device_id *pci_ent)
354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL;
358 printk(KERN_INFO "ATM he driver\n");
360 if (pci_enable_device(pci_dev))
362 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n");
365 goto init_one_failure;
368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
371 goto init_one_failure;
373 pci_set_drvdata(pci_dev, atm_dev);
375 he_dev = kzalloc(sizeof(struct he_dev),
379 goto init_one_failure;
381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number;
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 spin_lock_init(&he_dev->global_lock);
389 if (he_start(atm_dev)) {
392 goto init_one_failure;
396 he_dev->next = he_devs;
402 atm_dev_deregister(atm_dev);
404 pci_disable_device(pci_dev);
408 static void he_remove_one(struct pci_dev *pci_dev)
410 struct atm_dev *atm_dev;
411 struct he_dev *he_dev;
413 atm_dev = pci_get_drvdata(pci_dev);
414 he_dev = HE_DEV(atm_dev);
416 /* need to remove from he_devs */
419 atm_dev_deregister(atm_dev);
422 pci_disable_device(pci_dev);
427 rate_to_atmf(unsigned rate) /* cps to atm forum format */
429 #define NONZERO (1 << 14)
437 while (rate > 0x3ff) {
442 return (NONZERO | (exp << 9) | (rate & 0x1ff));
445 static void he_init_rx_lbfp0(struct he_dev *he_dev)
447 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
453 lbm_offset = he_readl(he_dev, RCMLBM_BA);
455 he_writel(he_dev, lbufd_index, RLBF0_H);
457 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
459 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
461 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
464 if (++lbuf_count == lbufs_per_row) {
466 row_offset += he_dev->bytes_per_row;
471 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
475 static void he_init_rx_lbfp1(struct he_dev *he_dev)
477 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
483 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
485 he_writel(he_dev, lbufd_index, RLBF1_H);
487 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
489 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
491 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
494 if (++lbuf_count == lbufs_per_row) {
496 row_offset += he_dev->bytes_per_row;
501 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
505 static void he_init_tx_lbfp(struct he_dev *he_dev)
507 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
515 he_writel(he_dev, lbufd_index, TLBF_H);
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
519 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
524 if (++lbuf_count == lbufs_per_row) {
526 row_offset += he_dev->bytes_per_row;
531 he_writel(he_dev, lbufd_index - 1, TLBF_T);
534 static int he_init_tpdrq(struct he_dev *he_dev)
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n");
544 he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 he_dev->tpdrq_head = he_dev->tpdrq_base;
547 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 he_writel(he_dev, 0, TPDRQ_T);
549 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
554 static void he_init_cs_block(struct he_dev *he_dev)
556 unsigned clock, rate, delta;
559 /* 5.1.7 cs block initialization */
561 for (reg = 0; reg < 0x20; ++reg)
562 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
564 /* rate grid timer reload values */
566 clock = he_is622(he_dev) ? 66667000 : 50000000;
567 rate = he_dev->atm_dev->link_rate;
568 delta = rate / 16 / 2;
570 for (reg = 0; reg < 0x10; ++reg) {
571 /* 2.4 internal transmit function
573 * we initialize the first row in the rate grid.
574 * values are period (in clock cycles) of timer
576 unsigned period = clock / rate;
578 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
582 if (he_is622(he_dev)) {
583 /* table 5.2 (4 cells per lbuf) */
584 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
590 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
598 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
601 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
609 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
612 /* table 5.1 (4 cells per lbuf) */
613 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
619 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
627 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
630 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
638 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
642 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
644 for (reg = 0; reg < 0x8; ++reg)
645 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
649 static int he_init_cs_block_rcm(struct he_dev *he_dev)
651 unsigned (*rategrid)[16][16];
652 unsigned rate, delta;
655 unsigned rate_atmf, exp, man;
656 unsigned long long rate_cps;
657 int mult, buf, buf_limit = 4;
659 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
663 /* initialize rate grid group table */
665 for (reg = 0x0; reg < 0xff; ++reg)
666 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
668 /* initialize rate controller groups */
670 for (reg = 0x100; reg < 0x1ff; ++reg)
671 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
673 /* initialize tNrm lookup table */
675 /* the manual makes reference to a routine in a sample driver
676 for proper configuration; fortunately, we only need this
677 in order to support abr connection */
679 /* initialize rate to group table */
681 rate = he_dev->atm_dev->link_rate;
685 * 2.4 transmit internal functions
687 * we construct a copy of the rate grid used by the scheduler
688 * in order to construct the rate to group table below
691 for (j = 0; j < 16; j++) {
692 (*rategrid)[0][j] = rate;
696 for (i = 1; i < 16; i++)
697 for (j = 0; j < 16; j++)
699 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
701 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
704 * 2.4 transmit internal function
706 * this table maps the upper 5 bits of exponent and mantissa
707 * of the atm forum representation of the rate into an index
712 while (rate_atmf < 0x400) {
713 man = (rate_atmf & 0x1f) << 4;
714 exp = rate_atmf >> 5;
717 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
725 for (i = 255; i > 0; i--)
726 if ((*rategrid)[i/16][i%16] >= rate_cps)
727 break; /* pick nearest rate instead? */
730 * each table entry is 16 bits: (rate grid index (8 bits)
731 * and a buffer limit (8 bits)
732 * there are two table entries in each 32-bit register
736 buf = rate_cps * he_dev->tx_numbuffs /
737 (he_dev->atm_dev->link_rate * 2);
739 /* this is pretty, but avoids _divdu3 and is mostly correct */
740 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 if (rate_cps > (272 * mult))
743 else if (rate_cps > (204 * mult))
745 else if (rate_cps > (136 * mult))
747 else if (rate_cps > (68 * mult))
754 reg = (reg << 16) | ((i << 8) | buf);
756 #define RTGTBL_OFFSET 0x400
759 he_writel_rcm(he_dev, reg,
760 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
769 static int he_init_group(struct he_dev *he_dev, int group)
771 struct he_buff *heb, *next;
775 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 G0_RBPS_BS + (group * 32));
782 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
783 * sizeof(unsigned long), GFP_KERNEL);
784 if (!he_dev->rbpl_table) {
785 hprintk("unable to allocate rbpl bitmap table\n");
788 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
790 /* rbpl_virt 64-bit pointers */
791 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
792 * sizeof(struct he_buff *), GFP_KERNEL);
793 if (!he_dev->rbpl_virt) {
794 hprintk("unable to allocate rbpl virt table\n");
795 goto out_free_rbpl_table;
798 /* large buffer pool */
799 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800 CONFIG_RBPL_BUFSIZE, 64, 0);
801 if (he_dev->rbpl_pool == NULL) {
802 hprintk("unable to create rbpl pool\n");
803 goto out_free_rbpl_virt;
806 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
807 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808 &he_dev->rbpl_phys, GFP_KERNEL);
809 if (he_dev->rbpl_base == NULL) {
810 hprintk("failed to alloc rbpl_base\n");
811 goto out_destroy_rbpl_pool;
814 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
816 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
818 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
821 heb->mapping = mapping;
822 list_add(&heb->entry, &he_dev->rbpl_outstanding);
824 set_bit(i, he_dev->rbpl_table);
825 he_dev->rbpl_virt[i] = heb;
826 he_dev->rbpl_hint = i + 1;
827 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
828 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
830 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
832 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834 G0_RBPL_T + (group * 32));
835 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836 G0_RBPL_BS + (group * 32));
838 RBP_THRESH(CONFIG_RBPL_THRESH) |
839 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
841 G0_RBPL_QI + (group * 32));
843 /* rx buffer ready queue */
845 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
846 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847 &he_dev->rbrq_phys, GFP_KERNEL);
848 if (he_dev->rbrq_base == NULL) {
849 hprintk("failed to allocate rbrq\n");
853 he_dev->rbrq_head = he_dev->rbrq_base;
854 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
857 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858 G0_RBRQ_Q + (group * 16));
860 hprintk("coalescing interrupts\n");
861 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862 G0_RBRQ_I + (group * 16));
864 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865 G0_RBRQ_I + (group * 16));
867 /* tx buffer ready queue */
869 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
870 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871 &he_dev->tbrq_phys, GFP_KERNEL);
872 if (he_dev->tbrq_base == NULL) {
873 hprintk("failed to allocate tbrq\n");
874 goto out_free_rbpq_base;
877 he_dev->tbrq_head = he_dev->tbrq_base;
879 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
887 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888 sizeof(struct he_rbrq), he_dev->rbrq_base,
891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
894 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895 sizeof(struct he_rbp), he_dev->rbpl_base,
897 out_destroy_rbpl_pool:
898 dma_pool_destroy(he_dev->rbpl_pool);
900 kfree(he_dev->rbpl_virt);
902 kfree(he_dev->rbpl_table);
907 static int he_init_irq(struct he_dev *he_dev)
911 /* 2.9.3.5 tail offset for each interrupt queue is located after the
912 end of the interrupt queue */
914 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
915 (CONFIG_IRQ_SIZE + 1)
916 * sizeof(struct he_irq),
919 if (he_dev->irq_base == NULL) {
920 hprintk("failed to allocate irq\n");
923 he_dev->irq_tailoffset = (unsigned *)
924 &he_dev->irq_base[CONFIG_IRQ_SIZE];
925 *he_dev->irq_tailoffset = 0;
926 he_dev->irq_head = he_dev->irq_base;
927 he_dev->irq_tail = he_dev->irq_base;
929 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
930 he_dev->irq_base[i].isw = ITYPE_INVALID;
932 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
934 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
936 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
937 he_writel(he_dev, 0x0, IRQ0_DATA);
939 he_writel(he_dev, 0x0, IRQ1_BASE);
940 he_writel(he_dev, 0x0, IRQ1_HEAD);
941 he_writel(he_dev, 0x0, IRQ1_CNTL);
942 he_writel(he_dev, 0x0, IRQ1_DATA);
944 he_writel(he_dev, 0x0, IRQ2_BASE);
945 he_writel(he_dev, 0x0, IRQ2_HEAD);
946 he_writel(he_dev, 0x0, IRQ2_CNTL);
947 he_writel(he_dev, 0x0, IRQ2_DATA);
949 he_writel(he_dev, 0x0, IRQ3_BASE);
950 he_writel(he_dev, 0x0, IRQ3_HEAD);
951 he_writel(he_dev, 0x0, IRQ3_CNTL);
952 he_writel(he_dev, 0x0, IRQ3_DATA);
954 /* 2.9.3.2 interrupt queue mapping registers */
956 he_writel(he_dev, 0x0, GRP_10_MAP);
957 he_writel(he_dev, 0x0, GRP_32_MAP);
958 he_writel(he_dev, 0x0, GRP_54_MAP);
959 he_writel(he_dev, 0x0, GRP_76_MAP);
961 if (request_irq(he_dev->pci_dev->irq,
962 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
963 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
967 he_dev->irq = he_dev->pci_dev->irq;
972 static int he_start(struct atm_dev *dev)
974 struct he_dev *he_dev;
975 struct pci_dev *pci_dev;
976 unsigned long membase;
979 u32 gen_cntl_0, host_cntl, lb_swap;
980 u8 cache_size, timer;
983 unsigned int status, reg;
986 he_dev = HE_DEV(dev);
987 pci_dev = he_dev->pci_dev;
989 membase = pci_resource_start(pci_dev, 0);
990 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
993 * pci bus controller initialization
996 /* 4.3 pci bus controller-specific initialization */
997 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
998 hprintk("can't read GEN_CNTL_0\n");
1001 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003 hprintk("can't write GEN_CNTL_0.\n");
1007 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008 hprintk("can't read PCI_COMMAND.\n");
1012 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014 hprintk("can't enable memory.\n");
1018 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019 hprintk("can't read cache line size?\n");
1023 if (cache_size < 16) {
1025 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026 hprintk("can't set cache line size to %d\n", cache_size);
1029 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030 hprintk("can't read latency timer?\n");
1036 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1038 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1042 #define LAT_TIMER 209
1043 if (timer < LAT_TIMER) {
1044 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1046 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047 hprintk("can't set latency timer to %d\n", timer);
1050 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051 hprintk("can't set up page mapping\n");
1055 /* 4.4 card reset */
1056 he_writel(he_dev, 0x0, RESET_CNTL);
1057 he_writel(he_dev, 0xff, RESET_CNTL);
1059 msleep(16); /* 16 ms */
1060 status = he_readl(he_dev, RESET_CNTL);
1061 if ((status & BOARD_RST_STATUS) == 0) {
1062 hprintk("reset failed\n");
1066 /* 4.5 set bus width */
1067 host_cntl = he_readl(he_dev, HOST_CNTL);
1068 if (host_cntl & PCI_BUS_SIZE64)
1069 gen_cntl_0 |= ENBL_64;
1071 gen_cntl_0 &= ~ENBL_64;
1073 if (disable64 == 1) {
1074 hprintk("disabling 64-bit pci bus transfers\n");
1075 gen_cntl_0 &= ~ENBL_64;
1078 if (gen_cntl_0 & ENBL_64)
1079 hprintk("64-bit transfers enabled\n");
1081 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1083 /* 4.7 read prom contents */
1084 for (i = 0; i < PROD_ID_LEN; ++i)
1085 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1087 he_dev->media = read_prom_byte(he_dev, MEDIA);
1089 for (i = 0; i < 6; ++i)
1090 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1092 hprintk("%s%s, %pM\n", he_dev->prod_id,
1093 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1094 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1095 ATM_OC12_PCR : ATM_OC3_PCR;
1097 /* 4.6 set host endianess */
1098 lb_swap = he_readl(he_dev, LB_SWAP);
1099 if (he_is622(he_dev))
1100 lb_swap &= ~XFER_SIZE; /* 4 cells */
1102 lb_swap |= XFER_SIZE; /* 8 cells */
1104 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1106 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1107 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1108 #endif /* __BIG_ENDIAN */
1109 he_writel(he_dev, lb_swap, LB_SWAP);
1111 /* 4.8 sdram controller initialization */
1112 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1114 /* 4.9 initialize rnum value */
1115 lb_swap |= SWAP_RNUM_MAX(0xf);
1116 he_writel(he_dev, lb_swap, LB_SWAP);
1118 /* 4.10 initialize the interrupt queues */
1119 if ((err = he_init_irq(he_dev)) != 0)
1122 /* 4.11 enable pci bus controller state machines */
1123 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1124 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1125 he_writel(he_dev, host_cntl, HOST_CNTL);
1127 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1128 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1131 * atm network controller initialization
1134 /* 5.1.1 generic configuration state */
1137 * local (cell) buffer memory map
1141 * 0 ____________1023 bytes 0 _______________________2047 bytes
1143 * | utility | | rx0 | |
1144 * 5|____________| 255|___________________| u |
1147 * | rx0 | row | tx | l |
1149 * | | 767|___________________| t |
1150 * 517|____________| 768| | y |
1151 * row 518| | | rx1 | |
1152 * | | 1023|___________________|___|
1157 * 1535|____________|
1160 * 2047|____________|
1164 /* total 4096 connections */
1165 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1166 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1168 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1169 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1173 if (nvpibits != -1) {
1174 he_dev->vpibits = nvpibits;
1175 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1178 if (nvcibits != -1) {
1179 he_dev->vcibits = nvcibits;
1180 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1184 if (he_is622(he_dev)) {
1185 he_dev->cells_per_row = 40;
1186 he_dev->bytes_per_row = 2048;
1187 he_dev->r0_numrows = 256;
1188 he_dev->tx_numrows = 512;
1189 he_dev->r1_numrows = 256;
1190 he_dev->r0_startrow = 0;
1191 he_dev->tx_startrow = 256;
1192 he_dev->r1_startrow = 768;
1194 he_dev->cells_per_row = 20;
1195 he_dev->bytes_per_row = 1024;
1196 he_dev->r0_numrows = 512;
1197 he_dev->tx_numrows = 1018;
1198 he_dev->r1_numrows = 512;
1199 he_dev->r0_startrow = 6;
1200 he_dev->tx_startrow = 518;
1201 he_dev->r1_startrow = 1536;
1204 he_dev->cells_per_lbuf = 4;
1205 he_dev->buffer_limit = 4;
1206 he_dev->r0_numbuffs = he_dev->r0_numrows *
1207 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1208 if (he_dev->r0_numbuffs > 2560)
1209 he_dev->r0_numbuffs = 2560;
1211 he_dev->r1_numbuffs = he_dev->r1_numrows *
1212 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1213 if (he_dev->r1_numbuffs > 2560)
1214 he_dev->r1_numbuffs = 2560;
1216 he_dev->tx_numbuffs = he_dev->tx_numrows *
1217 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1218 if (he_dev->tx_numbuffs > 5120)
1219 he_dev->tx_numbuffs = 5120;
1221 /* 5.1.2 configure hardware dependent registers */
1224 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1225 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1226 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1227 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1230 he_writel(he_dev, BANK_ON |
1231 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1235 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1236 RM_RW_WAIT(1), RCMCONFIG);
1238 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1239 TM_RW_WAIT(1), TCMCONFIG);
1241 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1244 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1245 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1246 RX_VALVP(he_dev->vpibits) |
1247 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1249 he_writel(he_dev, DRF_THRESH(0x20) |
1250 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1251 TX_VCI_MASK(he_dev->vcibits) |
1252 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1254 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1256 he_writel(he_dev, PHY_INT_ENB |
1257 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1260 /* 5.1.3 initialize connection memory */
1262 for (i = 0; i < TCM_MEM_SIZE; ++i)
1263 he_writel_tcm(he_dev, 0, i);
1265 for (i = 0; i < RCM_MEM_SIZE; ++i)
1266 he_writel_rcm(he_dev, 0, i);
1269 * transmit connection memory map
1272 * 0x0 ___________________
1278 * 0x8000|___________________|
1281 * 0xc000|___________________|
1284 * 0xe000|___________________|
1286 * 0xf000|___________________|
1288 * 0x10000|___________________|
1291 * |___________________|
1294 * 0x1ffff|___________________|
1299 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1300 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1301 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1302 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1303 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1307 * receive connection memory map
1309 * 0x0 ___________________
1315 * 0x8000|___________________|
1318 * | LBM | link lists of local
1319 * | tx | buffer memory
1321 * 0xd000|___________________|
1324 * 0xe000|___________________|
1327 * |___________________|
1330 * 0xffff|___________________|
1333 he_writel(he_dev, 0x08000, RCMLBM_BA);
1334 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1335 he_writel(he_dev, 0x0d800, RCMABR_BA);
1337 /* 5.1.4 initialize local buffer free pools linked lists */
1339 he_init_rx_lbfp0(he_dev);
1340 he_init_rx_lbfp1(he_dev);
1342 he_writel(he_dev, 0x0, RLBC_H);
1343 he_writel(he_dev, 0x0, RLBC_T);
1344 he_writel(he_dev, 0x0, RLBC_H2);
1346 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1347 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1349 he_init_tx_lbfp(he_dev);
1351 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1353 /* 5.1.5 initialize intermediate receive queues */
1355 if (he_is622(he_dev)) {
1356 he_writel(he_dev, 0x000f, G0_INMQ_S);
1357 he_writel(he_dev, 0x200f, G0_INMQ_L);
1359 he_writel(he_dev, 0x001f, G1_INMQ_S);
1360 he_writel(he_dev, 0x201f, G1_INMQ_L);
1362 he_writel(he_dev, 0x002f, G2_INMQ_S);
1363 he_writel(he_dev, 0x202f, G2_INMQ_L);
1365 he_writel(he_dev, 0x003f, G3_INMQ_S);
1366 he_writel(he_dev, 0x203f, G3_INMQ_L);
1368 he_writel(he_dev, 0x004f, G4_INMQ_S);
1369 he_writel(he_dev, 0x204f, G4_INMQ_L);
1371 he_writel(he_dev, 0x005f, G5_INMQ_S);
1372 he_writel(he_dev, 0x205f, G5_INMQ_L);
1374 he_writel(he_dev, 0x006f, G6_INMQ_S);
1375 he_writel(he_dev, 0x206f, G6_INMQ_L);
1377 he_writel(he_dev, 0x007f, G7_INMQ_S);
1378 he_writel(he_dev, 0x207f, G7_INMQ_L);
1380 he_writel(he_dev, 0x0000, G0_INMQ_S);
1381 he_writel(he_dev, 0x0008, G0_INMQ_L);
1383 he_writel(he_dev, 0x0001, G1_INMQ_S);
1384 he_writel(he_dev, 0x0009, G1_INMQ_L);
1386 he_writel(he_dev, 0x0002, G2_INMQ_S);
1387 he_writel(he_dev, 0x000a, G2_INMQ_L);
1389 he_writel(he_dev, 0x0003, G3_INMQ_S);
1390 he_writel(he_dev, 0x000b, G3_INMQ_L);
1392 he_writel(he_dev, 0x0004, G4_INMQ_S);
1393 he_writel(he_dev, 0x000c, G4_INMQ_L);
1395 he_writel(he_dev, 0x0005, G5_INMQ_S);
1396 he_writel(he_dev, 0x000d, G5_INMQ_L);
1398 he_writel(he_dev, 0x0006, G6_INMQ_S);
1399 he_writel(he_dev, 0x000e, G6_INMQ_L);
1401 he_writel(he_dev, 0x0007, G7_INMQ_S);
1402 he_writel(he_dev, 0x000f, G7_INMQ_L);
1405 /* 5.1.6 application tunable parameters */
1407 he_writel(he_dev, 0x0, MCC);
1408 he_writel(he_dev, 0x0, OEC);
1409 he_writel(he_dev, 0x0, DCC);
1410 he_writel(he_dev, 0x0, CEC);
1412 /* 5.1.7 cs block initialization */
1414 he_init_cs_block(he_dev);
1416 /* 5.1.8 cs block connection memory initialization */
1418 if (he_init_cs_block_rcm(he_dev) < 0)
1421 /* 5.1.10 initialize host structures */
1423 he_init_tpdrq(he_dev);
1425 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1426 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1427 if (he_dev->tpd_pool == NULL) {
1428 hprintk("unable to create tpd dma_pool\n");
1432 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1434 if (he_init_group(he_dev, 0) != 0)
1437 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1438 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1439 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1440 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1441 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1442 G0_RBPS_BS + (group * 32));
1444 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1445 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1446 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1447 G0_RBPL_QI + (group * 32));
1448 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1450 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1451 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1452 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1453 G0_RBRQ_Q + (group * 16));
1454 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1456 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1457 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1458 he_writel(he_dev, TBRQ_THRESH(0x1),
1459 G0_TBRQ_THRESH + (group * 16));
1460 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1463 /* host status page */
1465 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1466 sizeof(struct he_hsp),
1467 &he_dev->hsp_phys, GFP_KERNEL);
1468 if (he_dev->hsp == NULL) {
1469 hprintk("failed to allocate host status page\n");
1472 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1474 /* initialize framer */
1476 #ifdef CONFIG_ATM_HE_USE_SUNI
1477 if (he_isMM(he_dev))
1478 suni_init(he_dev->atm_dev);
1479 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1480 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1481 #endif /* CONFIG_ATM_HE_USE_SUNI */
1484 /* this really should be in suni.c but for now... */
1487 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1488 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1489 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1490 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1493 /* 5.1.12 enable transmit and receive */
1495 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1496 reg |= TX_ENABLE|ER_ENABLE;
1497 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1499 reg = he_readl(he_dev, RC_CONFIG);
1501 he_writel(he_dev, reg, RC_CONFIG);
1503 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1504 he_dev->cs_stper[i].inuse = 0;
1505 he_dev->cs_stper[i].pcr = -1;
1507 he_dev->total_bw = 0;
1510 /* atm linux initialization */
1512 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1513 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1515 he_dev->irq_peak = 0;
1516 he_dev->rbrq_peak = 0;
1517 he_dev->rbpl_peak = 0;
1518 he_dev->tbrq_peak = 0;
1520 HPRINTK("hell bent for leather!\n");
1526 he_stop(struct he_dev *he_dev)
1528 struct he_buff *heb, *next;
1529 struct pci_dev *pci_dev;
1530 u32 gen_cntl_0, reg;
1533 pci_dev = he_dev->pci_dev;
1535 /* disable interrupts */
1537 if (he_dev->membase) {
1538 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1539 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1540 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1542 tasklet_disable(&he_dev->tasklet);
1544 /* disable recv and transmit */
1546 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547 reg &= ~(TX_ENABLE|ER_ENABLE);
1548 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1550 reg = he_readl(he_dev, RC_CONFIG);
1551 reg &= ~(RX_ENABLE);
1552 he_writel(he_dev, reg, RC_CONFIG);
1555 #ifdef CONFIG_ATM_HE_USE_SUNI
1556 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1557 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1558 #endif /* CONFIG_ATM_HE_USE_SUNI */
1561 free_irq(he_dev->irq, he_dev);
1563 if (he_dev->irq_base)
1564 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1565 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1568 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1569 he_dev->hsp, he_dev->hsp_phys);
1571 if (he_dev->rbpl_base) {
1572 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1573 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1575 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1576 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1579 kfree(he_dev->rbpl_virt);
1580 kfree(he_dev->rbpl_table);
1582 if (he_dev->rbpl_pool)
1583 dma_pool_destroy(he_dev->rbpl_pool);
1585 if (he_dev->rbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1587 he_dev->rbrq_base, he_dev->rbrq_phys);
1589 if (he_dev->tbrq_base)
1590 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591 he_dev->tbrq_base, he_dev->tbrq_phys);
1593 if (he_dev->tpdrq_base)
1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1597 if (he_dev->tpd_pool)
1598 dma_pool_destroy(he_dev->tpd_pool);
1600 if (he_dev->pci_dev) {
1601 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1602 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1603 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1606 if (he_dev->membase)
1607 iounmap(he_dev->membase);
1610 static struct he_tpd *
1611 __alloc_tpd(struct he_dev *he_dev)
1616 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1620 tpd->status = TPD_ADDR(mapping);
1622 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1623 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1624 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1629 #define AAL5_LEN(buf,len) \
1630 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1631 (((unsigned char *)(buf))[(len)-5]))
1635 * aal5 packets can optionally return the tcp checksum in the lower
1636 * 16 bits of the crc (RSR0_TCP_CKSUM)
1639 #define TCP_CKSUM(buf,len) \
1640 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1641 (((unsigned char *)(buf))[(len-1)]))
1644 he_service_rbrq(struct he_dev *he_dev, int group)
1646 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1647 ((unsigned long)he_dev->rbrq_base |
1648 he_dev->hsp->group[group].rbrq_tail);
1649 unsigned cid, lastcid = -1;
1650 struct sk_buff *skb;
1651 struct atm_vcc *vcc = NULL;
1652 struct he_vcc *he_vcc;
1653 struct he_buff *heb, *next;
1655 int pdus_assembled = 0;
1658 read_lock(&vcc_sklist_lock);
1659 while (he_dev->rbrq_head != rbrq_tail) {
1662 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1663 he_dev->rbrq_head, group,
1664 RBRQ_ADDR(he_dev->rbrq_head),
1665 RBRQ_BUFLEN(he_dev->rbrq_head),
1666 RBRQ_CID(he_dev->rbrq_head),
1667 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1668 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1669 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1670 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1671 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1672 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1674 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1675 heb = he_dev->rbpl_virt[i];
1677 cid = RBRQ_CID(he_dev->rbrq_head);
1679 vcc = __find_vcc(he_dev, cid);
1682 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1683 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1684 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1685 clear_bit(i, he_dev->rbpl_table);
1686 list_del(&heb->entry);
1687 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1690 goto next_rbrq_entry;
1693 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1694 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1695 atomic_inc(&vcc->stats->rx_drop);
1696 goto return_host_buffers;
1699 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1700 clear_bit(i, he_dev->rbpl_table);
1701 list_move_tail(&heb->entry, &he_vcc->buffers);
1702 he_vcc->pdu_len += heb->len;
1704 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1706 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1707 wake_up(&he_vcc->rx_waitq);
1708 goto return_host_buffers;
1711 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1712 goto next_rbrq_entry;
1714 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1715 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1716 HPRINTK("%s%s (%d.%d)\n",
1717 RBRQ_CRC_ERR(he_dev->rbrq_head)
1719 RBRQ_LEN_ERR(he_dev->rbrq_head)
1721 vcc->vpi, vcc->vci);
1722 atomic_inc(&vcc->stats->rx_err);
1723 goto return_host_buffers;
1726 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1729 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1730 goto return_host_buffers;
1733 if (rx_skb_reserve > 0)
1734 skb_reserve(skb, rx_skb_reserve);
1736 __net_timestamp(skb);
1738 list_for_each_entry(heb, &he_vcc->buffers, entry)
1739 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1741 switch (vcc->qos.aal) {
1743 /* 2.10.1.5 raw cell receive */
1744 skb->len = ATM_AAL0_SDU;
1745 skb_set_tail_pointer(skb, skb->len);
1748 /* 2.10.1.2 aal5 receive */
1750 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1751 skb_set_tail_pointer(skb, skb->len);
1752 #ifdef USE_CHECKSUM_HW
1753 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1754 skb->ip_summed = CHECKSUM_COMPLETE;
1755 skb->csum = TCP_CKSUM(skb->data,
1762 #ifdef should_never_happen
1763 if (skb->len > vcc->qos.rxtp.max_sdu)
1764 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1768 ATM_SKB(skb)->vcc = vcc;
1770 spin_unlock(&he_dev->global_lock);
1771 vcc->push(vcc, skb);
1772 spin_lock(&he_dev->global_lock);
1774 atomic_inc(&vcc->stats->rx);
1776 return_host_buffers:
1779 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1780 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1781 INIT_LIST_HEAD(&he_vcc->buffers);
1782 he_vcc->pdu_len = 0;
1785 he_dev->rbrq_head = (struct he_rbrq *)
1786 ((unsigned long) he_dev->rbrq_base |
1787 RBRQ_MASK(he_dev->rbrq_head + 1));
1790 read_unlock(&vcc_sklist_lock);
1793 if (updated > he_dev->rbrq_peak)
1794 he_dev->rbrq_peak = updated;
1796 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1797 G0_RBRQ_H + (group * 16));
1800 return pdus_assembled;
1804 he_service_tbrq(struct he_dev *he_dev, int group)
1806 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1807 ((unsigned long)he_dev->tbrq_base |
1808 he_dev->hsp->group[group].tbrq_tail);
1810 int slot, updated = 0;
1811 struct he_tpd *__tpd;
1813 /* 2.1.6 transmit buffer return queue */
1815 while (he_dev->tbrq_head != tbrq_tail) {
1818 HPRINTK("tbrq%d 0x%x%s%s\n",
1820 TBRQ_TPD(he_dev->tbrq_head),
1821 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1822 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1824 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1825 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1827 list_del(&__tpd->entry);
1833 hprintk("unable to locate tpd for dma buffer %x\n",
1834 TBRQ_TPD(he_dev->tbrq_head));
1835 goto next_tbrq_entry;
1838 if (TBRQ_EOS(he_dev->tbrq_head)) {
1839 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1840 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1842 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1844 goto next_tbrq_entry;
1847 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1848 if (tpd->iovec[slot].addr)
1849 dma_unmap_single(&he_dev->pci_dev->dev,
1850 tpd->iovec[slot].addr,
1851 tpd->iovec[slot].len & TPD_LEN_MASK,
1853 if (tpd->iovec[slot].len & TPD_LST)
1858 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1859 if (tpd->vcc && tpd->vcc->pop)
1860 tpd->vcc->pop(tpd->vcc, tpd->skb);
1862 dev_kfree_skb_any(tpd->skb);
1867 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1868 he_dev->tbrq_head = (struct he_tbrq *)
1869 ((unsigned long) he_dev->tbrq_base |
1870 TBRQ_MASK(he_dev->tbrq_head + 1));
1874 if (updated > he_dev->tbrq_peak)
1875 he_dev->tbrq_peak = updated;
1877 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1878 G0_TBRQ_H + (group * 16));
1883 he_service_rbpl(struct he_dev *he_dev, int group)
1885 struct he_rbp *new_tail;
1886 struct he_rbp *rbpl_head;
1887 struct he_buff *heb;
1892 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1893 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1896 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1897 RBPL_MASK(he_dev->rbpl_tail+1));
1899 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1900 if (new_tail == rbpl_head)
1903 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1904 if (i > (RBPL_TABLE_SIZE - 1)) {
1905 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1906 if (i > (RBPL_TABLE_SIZE - 1))
1909 he_dev->rbpl_hint = i + 1;
1911 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1914 heb->mapping = mapping;
1915 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1916 he_dev->rbpl_virt[i] = heb;
1917 set_bit(i, he_dev->rbpl_table);
1918 new_tail->idx = i << RBP_IDX_OFFSET;
1919 new_tail->phys = mapping + offsetof(struct he_buff, data);
1921 he_dev->rbpl_tail = new_tail;
1926 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1930 he_tasklet(unsigned long data)
1932 unsigned long flags;
1933 struct he_dev *he_dev = (struct he_dev *) data;
1937 HPRINTK("tasklet (0x%lx)\n", data);
1938 spin_lock_irqsave(&he_dev->global_lock, flags);
1940 while (he_dev->irq_head != he_dev->irq_tail) {
1943 type = ITYPE_TYPE(he_dev->irq_head->isw);
1944 group = ITYPE_GROUP(he_dev->irq_head->isw);
1947 case ITYPE_RBRQ_THRESH:
1948 HPRINTK("rbrq%d threshold\n", group);
1950 case ITYPE_RBRQ_TIMER:
1951 if (he_service_rbrq(he_dev, group))
1952 he_service_rbpl(he_dev, group);
1954 case ITYPE_TBRQ_THRESH:
1955 HPRINTK("tbrq%d threshold\n", group);
1957 case ITYPE_TPD_COMPLETE:
1958 he_service_tbrq(he_dev, group);
1960 case ITYPE_RBPL_THRESH:
1961 he_service_rbpl(he_dev, group);
1963 case ITYPE_RBPS_THRESH:
1964 /* shouldn't happen unless small buffers enabled */
1967 HPRINTK("phy interrupt\n");
1968 #ifdef CONFIG_ATM_HE_USE_SUNI
1969 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1970 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1971 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1972 spin_lock_irqsave(&he_dev->global_lock, flags);
1976 switch (type|group) {
1978 hprintk("parity error\n");
1981 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1985 case ITYPE_TYPE(ITYPE_INVALID):
1986 /* see 8.1.1 -- check all queues */
1988 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1990 he_service_rbrq(he_dev, 0);
1991 he_service_rbpl(he_dev, 0);
1992 he_service_tbrq(he_dev, 0);
1995 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1998 he_dev->irq_head->isw = ITYPE_INVALID;
2000 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2004 if (updated > he_dev->irq_peak)
2005 he_dev->irq_peak = updated;
2008 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2009 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2010 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2011 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2013 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2017 he_irq_handler(int irq, void *dev_id)
2019 unsigned long flags;
2020 struct he_dev *he_dev = (struct he_dev * )dev_id;
2026 spin_lock_irqsave(&he_dev->global_lock, flags);
2028 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2029 (*he_dev->irq_tailoffset << 2));
2031 if (he_dev->irq_tail == he_dev->irq_head) {
2032 HPRINTK("tailoffset not updated?\n");
2033 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2034 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2035 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2039 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2040 hprintk("spurious (or shared) interrupt?\n");
2043 if (he_dev->irq_head != he_dev->irq_tail) {
2045 tasklet_schedule(&he_dev->tasklet);
2046 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2047 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2049 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2050 return IRQ_RETVAL(handled);
2054 static __inline__ void
2055 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2057 struct he_tpdrq *new_tail;
2059 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2060 tpd, cid, he_dev->tpdrq_tail);
2062 /* new_tail = he_dev->tpdrq_tail; */
2063 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2064 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2067 * check to see if we are about to set the tail == head
2068 * if true, update the head pointer from the adapter
2069 * to see if this is really the case (reading the queue
2070 * head for every enqueue would be unnecessarily slow)
2073 if (new_tail == he_dev->tpdrq_head) {
2074 he_dev->tpdrq_head = (struct he_tpdrq *)
2075 (((unsigned long)he_dev->tpdrq_base) |
2076 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2078 if (new_tail == he_dev->tpdrq_head) {
2081 hprintk("tpdrq full (cid 0x%x)\n", cid);
2084 * push tpd onto a transmit backlog queue
2085 * after service_tbrq, service the backlog
2086 * for now, we just drop the pdu
2088 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2089 if (tpd->iovec[slot].addr)
2090 dma_unmap_single(&he_dev->pci_dev->dev,
2091 tpd->iovec[slot].addr,
2092 tpd->iovec[slot].len & TPD_LEN_MASK,
2097 tpd->vcc->pop(tpd->vcc, tpd->skb);
2099 dev_kfree_skb_any(tpd->skb);
2100 atomic_inc(&tpd->vcc->stats->tx_err);
2102 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2107 /* 2.1.5 transmit packet descriptor ready queue */
2108 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2109 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2110 he_dev->tpdrq_tail->cid = cid;
2113 he_dev->tpdrq_tail = new_tail;
2115 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2116 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2120 he_open(struct atm_vcc *vcc)
2122 unsigned long flags;
2123 struct he_dev *he_dev = HE_DEV(vcc->dev);
2124 struct he_vcc *he_vcc;
2126 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2127 short vpi = vcc->vpi;
2130 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2133 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2135 set_bit(ATM_VF_ADDR, &vcc->flags);
2137 cid = he_mkcid(he_dev, vpi, vci);
2139 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2140 if (he_vcc == NULL) {
2141 hprintk("unable to allocate he_vcc during open\n");
2145 INIT_LIST_HEAD(&he_vcc->buffers);
2146 he_vcc->pdu_len = 0;
2147 he_vcc->rc_index = -1;
2149 init_waitqueue_head(&he_vcc->rx_waitq);
2150 init_waitqueue_head(&he_vcc->tx_waitq);
2152 vcc->dev_data = he_vcc;
2154 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2157 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2159 pcr_goal = he_dev->atm_dev->link_rate;
2160 if (pcr_goal < 0) /* means round down, technically */
2161 pcr_goal = -pcr_goal;
2163 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2165 switch (vcc->qos.aal) {
2167 tsr0_aal = TSR0_AAL5;
2171 tsr0_aal = TSR0_AAL0_SDU;
2172 tsr4 = TSR4_AAL0_SDU;
2179 spin_lock_irqsave(&he_dev->global_lock, flags);
2180 tsr0 = he_readl_tsr0(he_dev, cid);
2181 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2183 if (TSR0_CONN_STATE(tsr0) != 0) {
2184 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2189 switch (vcc->qos.txtp.traffic_class) {
2191 /* 2.3.3.1 open connection ubr */
2193 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2194 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2198 /* 2.3.3.2 open connection cbr */
2200 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2201 if ((he_dev->total_bw + pcr_goal)
2202 > (he_dev->atm_dev->link_rate * 9 / 10))
2208 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2210 /* find an unused cs_stper register */
2211 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2212 if (he_dev->cs_stper[reg].inuse == 0 ||
2213 he_dev->cs_stper[reg].pcr == pcr_goal)
2216 if (reg == HE_NUM_CS_STPER) {
2218 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2222 he_dev->total_bw += pcr_goal;
2224 he_vcc->rc_index = reg;
2225 ++he_dev->cs_stper[reg].inuse;
2226 he_dev->cs_stper[reg].pcr = pcr_goal;
2228 clock = he_is622(he_dev) ? 66667000 : 50000000;
2229 period = clock / pcr_goal;
2231 HPRINTK("rc_index = %d period = %d\n",
2234 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2236 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2238 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2247 spin_lock_irqsave(&he_dev->global_lock, flags);
2249 he_writel_tsr0(he_dev, tsr0, cid);
2250 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2251 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2252 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2253 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2254 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2256 he_writel_tsr3(he_dev, 0x0, cid);
2257 he_writel_tsr5(he_dev, 0x0, cid);
2258 he_writel_tsr6(he_dev, 0x0, cid);
2259 he_writel_tsr7(he_dev, 0x0, cid);
2260 he_writel_tsr8(he_dev, 0x0, cid);
2261 he_writel_tsr10(he_dev, 0x0, cid);
2262 he_writel_tsr11(he_dev, 0x0, cid);
2263 he_writel_tsr12(he_dev, 0x0, cid);
2264 he_writel_tsr13(he_dev, 0x0, cid);
2265 he_writel_tsr14(he_dev, 0x0, cid);
2266 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2267 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2270 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2273 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2274 &HE_VCC(vcc)->rx_waitq);
2276 switch (vcc->qos.aal) {
2288 spin_lock_irqsave(&he_dev->global_lock, flags);
2290 rsr0 = he_readl_rsr0(he_dev, cid);
2291 if (rsr0 & RSR0_OPEN_CONN) {
2292 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2294 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2299 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2300 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2301 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2302 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2304 #ifdef USE_CHECKSUM_HW
2305 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2306 rsr0 |= RSR0_TCP_CKSUM;
2309 he_writel_rsr4(he_dev, rsr4, cid);
2310 he_writel_rsr1(he_dev, rsr1, cid);
2311 /* 5.1.11 last parameter initialized should be
2312 the open/closed indication in rsr0 */
2313 he_writel_rsr0(he_dev,
2314 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2315 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2317 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2324 clear_bit(ATM_VF_ADDR, &vcc->flags);
2327 set_bit(ATM_VF_READY, &vcc->flags);
2333 he_close(struct atm_vcc *vcc)
2335 unsigned long flags;
2336 DECLARE_WAITQUEUE(wait, current);
2337 struct he_dev *he_dev = HE_DEV(vcc->dev);
2340 struct he_vcc *he_vcc = HE_VCC(vcc);
2341 #define MAX_RETRY 30
2342 int retry = 0, sleep = 1, tx_inuse;
2344 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2346 clear_bit(ATM_VF_READY, &vcc->flags);
2347 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2349 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2352 HPRINTK("close rx cid 0x%x\n", cid);
2354 /* 2.7.2.2 close receive operation */
2356 /* wait for previous close (if any) to finish */
2358 spin_lock_irqsave(&he_dev->global_lock, flags);
2359 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2360 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2364 set_current_state(TASK_UNINTERRUPTIBLE);
2365 add_wait_queue(&he_vcc->rx_waitq, &wait);
2367 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2368 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2369 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2370 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2372 timeout = schedule_timeout(30*HZ);
2374 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2375 set_current_state(TASK_RUNNING);
2378 hprintk("close rx timeout cid 0x%x\n", cid);
2380 HPRINTK("close rx cid 0x%x complete\n", cid);
2384 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2385 volatile unsigned tsr4, tsr0;
2388 HPRINTK("close tx cid 0x%x\n", cid);
2392 * ... the host must first stop queueing packets to the TPDRQ
2393 * on the connection to be closed, then wait for all outstanding
2394 * packets to be transmitted and their buffers returned to the
2395 * TBRQ. When the last packet on the connection arrives in the
2396 * TBRQ, the host issues the close command to the adapter.
2399 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2400 (retry < MAX_RETRY)) {
2409 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2411 /* 2.3.1.1 generic close operations with flush */
2413 spin_lock_irqsave(&he_dev->global_lock, flags);
2414 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2415 /* also clears TSR4_SESSION_ENDED */
2417 switch (vcc->qos.txtp.traffic_class) {
2419 he_writel_tsr1(he_dev,
2420 TSR1_MCR(rate_to_atmf(200000))
2421 | TSR1_PCR(0), cid);
2424 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2427 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2429 tpd = __alloc_tpd(he_dev);
2431 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2432 goto close_tx_incomplete;
2434 tpd->status |= TPD_EOS | TPD_INT;
2439 set_current_state(TASK_UNINTERRUPTIBLE);
2440 add_wait_queue(&he_vcc->tx_waitq, &wait);
2441 __enqueue_tpd(he_dev, tpd, cid);
2442 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2444 timeout = schedule_timeout(30*HZ);
2446 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2447 set_current_state(TASK_RUNNING);
2449 spin_lock_irqsave(&he_dev->global_lock, flags);
2452 hprintk("close tx timeout cid 0x%x\n", cid);
2453 goto close_tx_incomplete;
2456 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2457 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2461 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2462 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2466 close_tx_incomplete:
2468 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2469 int reg = he_vcc->rc_index;
2471 HPRINTK("cs_stper reg = %d\n", reg);
2473 if (he_dev->cs_stper[reg].inuse == 0)
2474 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2476 --he_dev->cs_stper[reg].inuse;
2478 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2480 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2482 HPRINTK("close tx cid 0x%x complete\n", cid);
2487 clear_bit(ATM_VF_ADDR, &vcc->flags);
2491 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2493 unsigned long flags;
2494 struct he_dev *he_dev = HE_DEV(vcc->dev);
2495 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2497 #ifdef USE_SCATTERGATHER
2501 #define HE_TPD_BUFSIZE 0xffff
2503 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2505 if ((skb->len > HE_TPD_BUFSIZE) ||
2506 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2507 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2511 dev_kfree_skb_any(skb);
2512 atomic_inc(&vcc->stats->tx_err);
2516 #ifndef USE_SCATTERGATHER
2517 if (skb_shinfo(skb)->nr_frags) {
2518 hprintk("no scatter/gather support\n");
2522 dev_kfree_skb_any(skb);
2523 atomic_inc(&vcc->stats->tx_err);
2527 spin_lock_irqsave(&he_dev->global_lock, flags);
2529 tpd = __alloc_tpd(he_dev);
2534 dev_kfree_skb_any(skb);
2535 atomic_inc(&vcc->stats->tx_err);
2536 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2540 if (vcc->qos.aal == ATM_AAL5)
2541 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2543 char *pti_clp = (void *) (skb->data + 3);
2546 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2547 clp = (*pti_clp & ATM_HDR_CLP);
2548 tpd->status |= TPD_CELLTYPE(pti);
2550 tpd->status |= TPD_CLP;
2552 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2555 #ifdef USE_SCATTERGATHER
2556 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2557 skb_headlen(skb), DMA_TO_DEVICE);
2558 tpd->iovec[slot].len = skb_headlen(skb);
2561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2562 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2564 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2566 tpd->skb = NULL; /* not the last fragment
2567 so dont ->push() yet */
2570 __enqueue_tpd(he_dev, tpd, cid);
2571 tpd = __alloc_tpd(he_dev);
2576 dev_kfree_skb_any(skb);
2577 atomic_inc(&vcc->stats->tx_err);
2578 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2581 tpd->status |= TPD_USERCELL;
2585 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2586 (void *) page_address(frag->page) + frag->page_offset,
2587 frag->size, DMA_TO_DEVICE);
2588 tpd->iovec[slot].len = frag->size;
2593 tpd->iovec[slot - 1].len |= TPD_LST;
2595 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2596 tpd->length0 = skb->len | TPD_LST;
2598 tpd->status |= TPD_INT;
2603 ATM_SKB(skb)->vcc = vcc;
2605 __enqueue_tpd(he_dev, tpd, cid);
2606 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2608 atomic_inc(&vcc->stats->tx);
2614 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2616 unsigned long flags;
2617 struct he_dev *he_dev = HE_DEV(atm_dev);
2618 struct he_ioctl_reg reg;
2623 if (!capable(CAP_NET_ADMIN))
2626 if (copy_from_user(®, arg,
2627 sizeof(struct he_ioctl_reg)))
2630 spin_lock_irqsave(&he_dev->global_lock, flags);
2632 case HE_REGTYPE_PCI:
2633 if (reg.addr >= HE_REGMAP_SIZE) {
2638 reg.val = he_readl(he_dev, reg.addr);
2640 case HE_REGTYPE_RCM:
2642 he_readl_rcm(he_dev, reg.addr);
2644 case HE_REGTYPE_TCM:
2646 he_readl_tcm(he_dev, reg.addr);
2648 case HE_REGTYPE_MBOX:
2650 he_readl_mbox(he_dev, reg.addr);
2656 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2658 if (copy_to_user(arg, ®,
2659 sizeof(struct he_ioctl_reg)))
2663 #ifdef CONFIG_ATM_HE_USE_SUNI
2664 if (atm_dev->phy && atm_dev->phy->ioctl)
2665 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2666 #else /* CONFIG_ATM_HE_USE_SUNI */
2668 #endif /* CONFIG_ATM_HE_USE_SUNI */
2676 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2678 unsigned long flags;
2679 struct he_dev *he_dev = HE_DEV(atm_dev);
2681 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2683 spin_lock_irqsave(&he_dev->global_lock, flags);
2684 he_writel(he_dev, val, FRAMER + (addr*4));
2685 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2686 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2690 static unsigned char
2691 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2693 unsigned long flags;
2694 struct he_dev *he_dev = HE_DEV(atm_dev);
2697 spin_lock_irqsave(&he_dev->global_lock, flags);
2698 reg = he_readl(he_dev, FRAMER + (addr*4));
2699 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2701 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2706 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2708 unsigned long flags;
2709 struct he_dev *he_dev = HE_DEV(dev);
2712 struct he_rbrq *rbrq_tail;
2713 struct he_tpdrq *tpdrq_head;
2714 int rbpl_head, rbpl_tail;
2716 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2721 return sprintf(page, "ATM he driver\n");
2724 return sprintf(page, "%s%s\n\n",
2725 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2728 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2730 spin_lock_irqsave(&he_dev->global_lock, flags);
2731 mcc += he_readl(he_dev, MCC);
2732 oec += he_readl(he_dev, OEC);
2733 dcc += he_readl(he_dev, DCC);
2734 cec += he_readl(he_dev, CEC);
2735 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2738 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2739 mcc, oec, dcc, cec);
2742 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2743 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2746 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2750 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2751 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2754 return sprintf(page, "tbrq_size = %d peak = %d\n",
2755 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2759 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2760 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2762 inuse = rbpl_head - rbpl_tail;
2764 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2765 inuse /= sizeof(struct he_rbp);
2768 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2769 CONFIG_RBPL_SIZE, inuse);
2773 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2775 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2777 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2778 he_dev->cs_stper[i].pcr,
2779 he_dev->cs_stper[i].inuse);
2782 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2783 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2788 /* eeprom routines -- see 4.7 */
2790 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2792 u32 val = 0, tmp_read = 0;
2796 val = readl(he_dev->membase + HOST_CNTL);
2799 /* Turn on write enable */
2801 he_writel(he_dev, val, HOST_CNTL);
2803 /* Send READ instruction */
2804 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2805 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2806 udelay(EEPROM_DELAY);
2809 /* Next, we need to send the byte address to read from */
2810 for (i = 7; i >= 0; i--) {
2811 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2812 udelay(EEPROM_DELAY);
2813 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2814 udelay(EEPROM_DELAY);
2819 val &= 0xFFFFF7FF; /* Turn off write enable */
2820 he_writel(he_dev, val, HOST_CNTL);
2822 /* Now, we can read data from the EEPROM by clocking it in */
2823 for (i = 7; i >= 0; i--) {
2824 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2825 udelay(EEPROM_DELAY);
2826 tmp_read = he_readl(he_dev, HOST_CNTL);
2827 byte_read |= (unsigned char)
2828 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2829 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2830 udelay(EEPROM_DELAY);
2833 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2834 udelay(EEPROM_DELAY);
2839 MODULE_LICENSE("GPL");
2840 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2841 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2842 module_param(disable64, bool, 0);
2843 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2844 module_param(nvpibits, short, 0);
2845 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2846 module_param(nvcibits, short, 0);
2847 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2848 module_param(rx_skb_reserve, short, 0);
2849 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2850 module_param(irq_coalesce, bool, 0);
2851 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2852 module_param(sdh, bool, 0);
2853 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2855 static struct pci_device_id he_pci_tbl[] = {
2856 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2860 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2862 static struct pci_driver he_driver = {
2864 .probe = he_init_one,
2865 .remove = he_remove_one,
2866 .id_table = he_pci_tbl,
2869 module_pci_driver(he_driver);