1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
43 #include <linux/module.h>
44 #include <linux/kernel.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/wait.h>
57 #include <linux/slab.h>
58 #include <asm/system.h>
60 #include <asm/atomic.h>
61 #include <asm/uaccess.h>
62 #include <asm/string.h>
63 #include <asm/byteorder.h>
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(unsigned long arg);
79 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
91 MODULE_LICENSE("GPL");
93 /**************************** IA_LIB **********************************/
95 static void ia_init_rtn_q (IARTN_Q *que)
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
104 if (que->next == NULL)
105 que->next = que->tail = data;
107 data->next = que->next;
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115 if (!entry) return -1;
118 if (que->next == NULL)
119 que->next = que->tail = entry;
121 que->tail->next = entry;
122 que->tail = que->tail->next;
127 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
129 if (que->next == NULL)
132 if ( que->next == que->tail)
133 que->next = que->tail = NULL;
135 que->next = que->next->next;
139 static void ia_hack_tcq(IADEV *dev) {
143 struct ia_vcc *iavcc_r = NULL;
145 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146 while (dev->host_tcq_wr != tcq_wr) {
147 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
149 else if (!dev->desc_tbl[desc1 -1].timestamp) {
150 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
153 else if (dev->desc_tbl[desc1 -1].timestamp) {
154 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
155 printk("IA: Fatal err in get_desc\n");
158 iavcc_r->vc_desc_cnt--;
159 dev->desc_tbl[desc1 -1].timestamp = 0;
160 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161 dev->desc_tbl[desc1 -1].txskb, desc1);)
162 if (iavcc_r->pcr < dev->rate_limit) {
163 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165 printk("ia_hack_tcq: No memory available\n");
167 dev->desc_tbl[desc1 -1].iavcc = NULL;
168 dev->desc_tbl[desc1 -1].txskb = NULL;
170 dev->host_tcq_wr += 2;
171 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
172 dev->host_tcq_wr = dev->ffL.tcq_st;
176 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
179 struct ia_vcc *iavcc_r = NULL;
181 static unsigned long timer = 0;
185 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
188 while (i < dev->num_tx_desc) {
189 if (!dev->desc_tbl[i].timestamp) {
193 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
194 delta = jiffies - dev->desc_tbl[i].timestamp;
195 if (delta >= ltimeout) {
196 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
198 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
200 dev->ffL.tcq_rd -= 2;
201 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202 if (!(skb = dev->desc_tbl[i].txskb) ||
203 !(iavcc_r = dev->desc_tbl[i].iavcc))
204 printk("Fatal err, desc table vcc or skb is NULL\n");
206 iavcc_r->vc_desc_cnt--;
207 dev->desc_tbl[i].timestamp = 0;
208 dev->desc_tbl[i].iavcc = NULL;
209 dev->desc_tbl[i].txskb = NULL;
214 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217 /* Get the next available descriptor number from TCQ */
218 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221 dev->ffL.tcq_rd += 2;
222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
223 dev->ffL.tcq_rd = dev->ffL.tcq_st;
224 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229 /* get system time */
230 dev->desc_tbl[desc_num -1].timestamp = jiffies;
234 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
236 vcstatus_t *vcstatus;
238 u_short tempCellSlot, tempFract;
239 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247 if( vcstatus->cnt == 0x05 ) {
250 if( eabr_vc->last_desc ) {
251 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252 /* Wait for 10 Micro sec */
254 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
258 tempCellSlot = abr_vc->last_cell_slot;
259 tempFract = abr_vc->fraction;
260 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261 && (tempFract == dev->testTable[vcc->vci]->fract))
263 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
264 dev->testTable[vcc->vci]->fract = tempFract;
266 } /* last descriptor */
268 } /* vcstatus->cnt */
271 IF_ABR(printk("LOCK UP found\n");)
272 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273 /* Wait for 10 Micro sec */
275 abr_vc->status &= 0xFFF8;
276 abr_vc->status |= 0x0001; /* state is idle */
277 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
278 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
280 shd_tbl[i] = vcc->vci;
282 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
297 ** +----+----+------------------+-------------------------------+
298 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
299 ** +----+----+------------------+-------------------------------+
301 ** R = reserved (written as 0)
302 ** NZ = 0 if 0 cells/sec; 1 otherwise
304 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307 cellrate_to_float(u32 cr)
311 #define M_BITS 9 /* Number of bits in mantissa */
312 #define E_BITS 5 /* Number of bits in exponent */
316 u32 tmp = cr & 0x00ffffff;
325 flot = NZ | (i << M_BITS) | (cr & M_MASK);
327 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
329 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338 float_to_cellrate(u16 rate)
340 u32 exp, mantissa, cps;
341 if ((rate & NZ) == 0)
343 exp = (rate >> M_BITS) & E_MASK;
344 mantissa = rate & M_MASK;
347 cps = (1 << M_BITS) | mantissa;
350 else if (exp > M_BITS)
351 cps <<= (exp - M_BITS);
353 cps >>= (M_BITS - exp);
358 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359 srv_p->class_type = ATM_ABR;
360 srv_p->pcr = dev->LineRate;
362 srv_p->icr = 0x055cb7;
363 srv_p->tbe = 0xffffff;
374 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
375 struct atm_vcc *vcc, u8 flag)
377 f_vc_abr_entry *f_abr_vc;
378 r_vc_abr_entry *r_abr_vc;
381 u16 adtf, air, *ptr16;
382 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383 f_abr_vc += vcc->vci;
385 case 1: /* FFRED initialization */
386 #if 0 /* sanity check */
389 if (srv_p->pcr > dev->LineRate)
390 srv_p->pcr = dev->LineRate;
391 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392 return MCR_UNAVAILABLE;
393 if (srv_p->mcr > srv_p->pcr)
396 srv_p->icr = srv_p->pcr;
397 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
399 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
401 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
403 if (srv_p->nrm > MAX_NRM)
405 if (srv_p->trm > MAX_TRM)
407 if (srv_p->adtf > MAX_ADTF)
409 else if (srv_p->adtf == 0)
411 if (srv_p->cdf > MAX_CDF)
413 if (srv_p->rif > MAX_RIF)
415 if (srv_p->rdf > MAX_RDF)
418 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419 f_abr_vc->f_vc_type = ABR;
420 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
421 /* i.e 2**n = 2 << (n-1) */
422 f_abr_vc->f_nrm = nrm << 8 | nrm;
423 trm = 100000/(2 << (16 - srv_p->trm));
424 if ( trm == 0) trm = 1;
425 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426 crm = srv_p->tbe / nrm;
427 if (crm == 0) crm = 1;
428 f_abr_vc->f_crm = crm & 0xff;
429 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431 ((srv_p->tbe/srv_p->frtt)*1000000) :
432 (1000000/(srv_p->frtt/srv_p->tbe)));
433 f_abr_vc->f_icr = cellrate_to_float(icr);
434 adtf = (10000 * srv_p->adtf)/8192;
435 if (adtf == 0) adtf = 1;
436 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438 f_abr_vc->f_acr = f_abr_vc->f_icr;
439 f_abr_vc->f_status = 0x0042;
441 case 0: /* RFRED initialization */
442 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
443 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445 r_abr_vc += vcc->vci;
446 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447 air = srv_p->pcr << (15 - srv_p->rif);
448 if (air == 0) air = 1;
449 r_abr_vc->r_air = cellrate_to_float(air);
450 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451 dev->sum_mcr += srv_p->mcr;
459 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460 u32 rateLow=0, rateHigh, rate;
462 struct ia_vcc *ia_vcc;
464 int idealSlot =0, testSlot, toBeAssigned, inc;
466 u16 *SchedTbl, *TstSchedTbl;
472 /* IpAdjustTrafficParams */
473 if (vcc->qos.txtp.max_pcr <= 0) {
474 IF_ERR(printk("PCR for CBR not defined\n");)
477 rate = vcc->qos.txtp.max_pcr;
478 entries = rate / dev->Granularity;
479 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480 entries, rate, dev->Granularity);)
482 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
483 rateLow = entries * dev->Granularity;
484 rateHigh = (entries + 1) * dev->Granularity;
485 if (3*(rate - rateLow) > (rateHigh - rate))
487 if (entries > dev->CbrRemEntries) {
488 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490 entries, dev->CbrRemEntries);)
494 ia_vcc = INPH_IA_VCC(vcc);
495 ia_vcc->NumCbrEntry = entries;
496 dev->sum_mcr += entries * dev->Granularity;
497 /* IaFFrednInsertCbrSched */
498 // Starting at an arbitrary location, place the entries into the table
499 // as smoothly as possible
501 spacing = dev->CbrTotEntries / entries;
502 sp_mod = dev->CbrTotEntries % entries; // get modulo
503 toBeAssigned = entries;
506 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509 // If this is the first time, start the table loading for this connection
510 // as close to entryPoint as possible.
511 if (toBeAssigned == entries)
513 idealSlot = dev->CbrEntryPt;
514 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
515 if (dev->CbrEntryPt >= dev->CbrTotEntries)
516 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
518 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519 // in the table that would be smoothest
520 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
521 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
523 if (idealSlot >= (int)dev->CbrTotEntries)
524 idealSlot -= dev->CbrTotEntries;
525 // Continuously check around this ideal value until a null
526 // location is encountered.
527 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
529 testSlot = idealSlot;
530 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
531 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532 testSlot, TstSchedTbl,toBeAssigned);)
533 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534 while (cbrVC) // If another VC at this location, we have to keep looking
537 testSlot = idealSlot - inc;
538 if (testSlot < 0) { // Wrap if necessary
539 testSlot += dev->CbrTotEntries;
540 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
544 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547 testSlot = idealSlot + inc;
548 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549 testSlot -= dev->CbrTotEntries;
550 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
552 testSlot, toBeAssigned);)
554 // set table index and read in value
555 TstSchedTbl = (u16*)(SchedTbl + testSlot);
556 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557 TstSchedTbl,cbrVC,inc);)
558 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
560 // Move this VCI number into this location of the CBR Sched table.
561 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562 dev->CbrRemEntries--;
566 /* IaFFrednCbrEnable */
567 dev->NumEnabledCBR++;
568 if (dev->NumEnabledCBR == 1) {
569 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570 IF_CBR(printk("CBR is enabled\n");)
574 static void ia_cbrVc_close (struct atm_vcc *vcc) {
576 u16 *SchedTbl, NullVci = 0;
579 iadev = INPH_IA_DEV(vcc->dev);
580 iadev->NumEnabledCBR--;
581 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582 if (iadev->NumEnabledCBR == 0) {
583 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584 IF_CBR (printk("CBR support disabled\n");)
587 for (i=0; i < iadev->CbrTotEntries; i++)
589 if (*SchedTbl == vcc->vci) {
590 iadev->CbrRemEntries++;
596 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599 static int ia_avail_descs(IADEV *iadev) {
602 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
605 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606 iadev->ffL.tcq_st) / 2;
610 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
612 static int ia_que_tx (IADEV *iadev) {
616 struct ia_vcc *iavcc;
617 num_desc = ia_avail_descs(iadev);
619 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
620 if (!(vcc = ATM_SKB(skb)->vcc)) {
621 dev_kfree_skb_any(skb);
622 printk("ia_que_tx: Null vcc\n");
625 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
626 dev_kfree_skb_any(skb);
627 printk("Free the SKB on closed vci %d \n", vcc->vci);
630 iavcc = INPH_IA_VCC(vcc);
631 if (ia_pkt_tx (vcc, skb)) {
632 skb_queue_head(&iadev->tx_backlog, skb);
639 static void ia_tx_poll (IADEV *iadev) {
640 struct atm_vcc *vcc = NULL;
641 struct sk_buff *skb = NULL, *skb1 = NULL;
642 struct ia_vcc *iavcc;
646 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647 skb = rtne->data.txskb;
649 printk("ia_tx_poll: skb is null\n");
652 vcc = ATM_SKB(skb)->vcc;
654 printk("ia_tx_poll: vcc is null\n");
655 dev_kfree_skb_any(skb);
659 iavcc = INPH_IA_VCC(vcc);
661 printk("ia_tx_poll: iavcc is null\n");
662 dev_kfree_skb_any(skb);
666 skb1 = skb_dequeue(&iavcc->txing_skb);
667 while (skb1 && (skb1 != skb)) {
668 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
671 IF_ERR(printk("Release the SKB not match\n");)
672 if ((vcc->pop) && (skb1->len != 0))
675 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
679 dev_kfree_skb_any(skb1);
680 skb1 = skb_dequeue(&iavcc->txing_skb);
683 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
684 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
687 if ((vcc->pop) && (skb->len != 0))
690 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
693 dev_kfree_skb_any(skb);
701 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
706 * Issue a command to enable writes to the NOVRAM
708 NVRAM_CMD (EXTEND + EWEN);
711 * issue the write command
713 NVRAM_CMD(IAWRITE + addr);
715 * Send the data, starting with D15, then D14, and so on for 16 bits
717 for (i=15; i>=0; i--) {
718 NVRAM_CLKOUT (val & 0x8000);
723 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
729 * disable writes again
731 NVRAM_CMD(EXTEND + EWDS)
737 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
743 * Read the first bit that was clocked with the falling edge of the
744 * the last command data clock
746 NVRAM_CMD(IAREAD + addr);
748 * Now read the rest of the bits, the next bit read is D14, then D13,
752 for (i=15; i>=0; i--) {
761 static void ia_hw_type(IADEV *iadev) {
762 u_short memType = ia_eeprom_get(iadev, 25);
763 iadev->memType = memType;
764 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765 iadev->num_tx_desc = IA_TX_BUF;
766 iadev->tx_buf_sz = IA_TX_BUF_SZ;
767 iadev->num_rx_desc = IA_RX_BUF;
768 iadev->rx_buf_sz = IA_RX_BUF_SZ;
769 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770 if (IA_TX_BUF == DFL_TX_BUFFERS)
771 iadev->num_tx_desc = IA_TX_BUF / 2;
773 iadev->num_tx_desc = IA_TX_BUF;
774 iadev->tx_buf_sz = IA_TX_BUF_SZ;
775 if (IA_RX_BUF == DFL_RX_BUFFERS)
776 iadev->num_rx_desc = IA_RX_BUF / 2;
778 iadev->num_rx_desc = IA_RX_BUF;
779 iadev->rx_buf_sz = IA_RX_BUF_SZ;
782 if (IA_TX_BUF == DFL_TX_BUFFERS)
783 iadev->num_tx_desc = IA_TX_BUF / 8;
785 iadev->num_tx_desc = IA_TX_BUF;
786 iadev->tx_buf_sz = IA_TX_BUF_SZ;
787 if (IA_RX_BUF == DFL_RX_BUFFERS)
788 iadev->num_rx_desc = IA_RX_BUF / 8;
790 iadev->num_rx_desc = IA_RX_BUF;
791 iadev->rx_buf_sz = IA_RX_BUF_SZ;
793 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
799 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800 iadev->phy_type = PHY_OC3C_S;
801 else if ((memType & FE_MASK) == FE_UTP_OPTION)
802 iadev->phy_type = PHY_UTP155;
804 iadev->phy_type = PHY_OC3C_M;
807 iadev->phy_type = memType & FE_MASK;
808 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809 memType,iadev->phy_type);)
810 if (iadev->phy_type == FE_25MBIT_PHY)
811 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812 else if (iadev->phy_type == FE_DS3_PHY)
813 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814 else if (iadev->phy_type == FE_E3_PHY)
815 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
817 iadev->LineRate = (u32)(ATM_OC3_PCR);
818 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
822 static void IaFrontEndIntr(IADEV *iadev) {
823 volatile IA_SUNI *suni;
824 volatile ia_mb25_t *mb25;
825 volatile suni_pm7345_t *suni_pm7345;
829 if(iadev->phy_type & FE_25MBIT_PHY) {
830 mb25 = (ia_mb25_t*)iadev->phy;
831 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
832 } else if (iadev->phy_type & FE_DS3_PHY) {
833 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
834 /* clear FRMR interrupts */
835 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
836 iadev->carrier_detect =
837 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
838 } else if (iadev->phy_type & FE_E3_PHY ) {
839 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
840 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
841 iadev->carrier_detect =
842 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
845 suni = (IA_SUNI *)iadev->phy;
846 intr_status = suni->suni_rsop_status & 0xff;
847 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
849 if (iadev->carrier_detect)
850 printk("IA: SUNI carrier detected\n");
852 printk("IA: SUNI carrier lost signal\n");
856 static void ia_mb25_init (IADEV *iadev)
858 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
860 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
862 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
863 mb25->mb25_diag_control = 0;
865 * Initialize carrier detect state
867 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
871 static void ia_suni_pm7345_init (IADEV *iadev)
873 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
874 if (iadev->phy_type & FE_DS3_PHY)
876 iadev->carrier_detect =
877 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
878 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
879 suni_pm7345->suni_ds3_frm_cfg = 1;
880 suni_pm7345->suni_ds3_tran_cfg = 1;
881 suni_pm7345->suni_config = 0;
882 suni_pm7345->suni_splr_cfg = 0;
883 suni_pm7345->suni_splt_cfg = 0;
887 iadev->carrier_detect =
888 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
889 suni_pm7345->suni_e3_frm_fram_options = 0x4;
890 suni_pm7345->suni_e3_frm_maint_options = 0x20;
891 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
892 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
893 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
894 suni_pm7345->suni_e3_tran_fram_options = 0x1;
895 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
896 suni_pm7345->suni_splr_cfg = 0x41;
897 suni_pm7345->suni_splt_cfg = 0x41;
900 * Enable RSOP loss of signal interrupt.
902 suni_pm7345->suni_intr_enbl = 0x28;
905 * Clear error counters
907 suni_pm7345->suni_id_reset = 0;
910 * Clear "PMCTST" in master test register.
912 suni_pm7345->suni_master_test = 0;
914 suni_pm7345->suni_rxcp_ctrl = 0x2c;
915 suni_pm7345->suni_rxcp_fctrl = 0x81;
917 suni_pm7345->suni_rxcp_idle_pat_h1 =
918 suni_pm7345->suni_rxcp_idle_pat_h2 =
919 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
920 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
922 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
923 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
924 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
925 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
927 suni_pm7345->suni_rxcp_cell_pat_h1 =
928 suni_pm7345->suni_rxcp_cell_pat_h2 =
929 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
930 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
932 suni_pm7345->suni_rxcp_cell_mask_h1 =
933 suni_pm7345->suni_rxcp_cell_mask_h2 =
934 suni_pm7345->suni_rxcp_cell_mask_h3 =
935 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
937 suni_pm7345->suni_txcp_ctrl = 0xa4;
938 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
939 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
941 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
946 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
947 #endif /* __SNMP__ */
952 /***************************** IA_LIB END *****************************/
954 #ifdef CONFIG_ATM_IA_DEBUG
955 static int tcnter = 0;
956 static void xdump( u_char* cp, int length, char* prefix )
960 u_char* pBuf = prntBuf;
962 while(count < length){
963 pBuf += sprintf( pBuf, "%s", prefix );
964 for(col = 0;count + col < length && col < 16; col++){
965 if (col != 0 && (col % 4) == 0)
966 pBuf += sprintf( pBuf, " " );
967 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
969 while(col++ < 16){ /* pad end of buffer with blanks */
971 sprintf( pBuf, " " );
972 pBuf += sprintf( pBuf, " " );
974 pBuf += sprintf( pBuf, " " );
975 for(col = 0;count + col < length && col < 16; col++){
976 if (isprint((int)cp[count + col]))
977 pBuf += sprintf( pBuf, "%c", cp[count + col] );
979 pBuf += sprintf( pBuf, "." );
981 printk("%s\n", prntBuf);
986 } /* close xdump(... */
987 #endif /* CONFIG_ATM_IA_DEBUG */
990 static struct atm_dev *ia_boards = NULL;
992 #define ACTUAL_RAM_BASE \
993 RAM_BASE*((iadev->mem)/(128 * 1024))
994 #define ACTUAL_SEG_RAM_BASE \
995 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
996 #define ACTUAL_REASS_RAM_BASE \
997 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1000 /*-- some utilities and memory allocation stuff will come here -------------*/
1002 static void desc_dbg(IADEV *iadev) {
1004 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1007 // regval = readl((u32)ia_cmds->maddr);
1008 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1009 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1010 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1011 readw(iadev->seg_ram+tcq_wr_ptr-2));
1012 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1014 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1015 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1016 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1018 while (tcq_st_ptr != tcq_ed_ptr) {
1019 tmp = iadev->seg_ram+tcq_st_ptr;
1020 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1023 for(i=0; i <iadev->num_tx_desc; i++)
1024 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1028 /*----------------------------- Recieving side stuff --------------------------*/
1030 static void rx_excp_rcvd(struct atm_dev *dev)
1032 #if 0 /* closing the receiving size will cause too many excp int */
1035 u_short excpq_rd_ptr;
1038 iadev = INPH_IA_DEV(dev);
1039 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1040 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1041 { printk("state = %x \n", state);
1042 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1043 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1044 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1045 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1046 // TODO: update exception stat
1047 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1048 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1051 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1052 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1053 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1054 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1059 static void free_desc(struct atm_dev *dev, int desc)
1062 iadev = INPH_IA_DEV(dev);
1063 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1064 iadev->rfL.fdq_wr +=2;
1065 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1066 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1067 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1071 static int rx_pkt(struct atm_dev *dev)
1074 struct atm_vcc *vcc;
1075 unsigned short status;
1076 struct rx_buf_desc __iomem *buf_desc_ptr;
1080 struct sk_buff *skb;
1081 u_int buf_addr, dma_addr;
1083 iadev = INPH_IA_DEV(dev);
1084 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1086 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1089 /* mask 1st 3 bits to get the actual descno. */
1090 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1091 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1092 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1093 printk(" pcq_wr_ptr = 0x%x\n",
1094 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1095 /* update the read pointer - maybe we shud do this in the end*/
1096 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1097 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1099 iadev->rfL.pcq_rd += 2;
1100 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1102 /* get the buffer desc entry.
1103 update stuff. - doesn't seem to be any update necessary
1105 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1106 /* make the ptr point to the corresponding buffer desc entry */
1107 buf_desc_ptr += desc;
1108 if (!desc || (desc > iadev->num_rx_desc) ||
1109 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1110 free_desc(dev, desc);
1111 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1114 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1117 free_desc(dev, desc);
1118 printk("IA: null vcc, drop PDU\n");
1123 /* might want to check the status bits for errors */
1124 status = (u_short) (buf_desc_ptr->desc_mode);
1125 if (status & (RX_CER | RX_PTE | RX_OFL))
1127 atomic_inc(&vcc->stats->rx_err);
1128 IF_ERR(printk("IA: bad packet, dropping it");)
1129 if (status & RX_CER) {
1130 IF_ERR(printk(" cause: packet CRC error\n");)
1132 else if (status & RX_PTE) {
1133 IF_ERR(printk(" cause: packet time out\n");)
1136 IF_ERR(printk(" cause: buffer overflow\n");)
1145 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1146 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1147 len = dma_addr - buf_addr;
1148 if (len > iadev->rx_buf_sz) {
1149 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1150 atomic_inc(&vcc->stats->rx_err);
1154 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1156 printk("Drop control packets\n");
1161 ATM_SKB(skb)->vcc = vcc;
1162 ATM_DESC(skb) = desc;
1163 skb_queue_tail(&iadev->rx_dma_q, skb);
1165 /* Build the DLE structure */
1166 wr_ptr = iadev->rx_dle_q.write;
1167 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1168 len, PCI_DMA_FROMDEVICE);
1169 wr_ptr->local_pkt_addr = buf_addr;
1170 wr_ptr->bytes = len; /* We don't know this do we ?? */
1171 wr_ptr->mode = DMA_INT_ENABLE;
1173 /* shud take care of wrap around here too. */
1174 if(++wr_ptr == iadev->rx_dle_q.end)
1175 wr_ptr = iadev->rx_dle_q.start;
1176 iadev->rx_dle_q.write = wr_ptr;
1178 /* Increment transaction counter */
1179 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1182 free_desc(dev, desc);
1186 static void rx_intr(struct atm_dev *dev)
1192 iadev = INPH_IA_DEV(dev);
1193 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1194 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1195 if (status & RX_PKT_RCVD)
1198 /* Basically recvd an interrupt for receving a packet.
1199 A descriptor would have been written to the packet complete
1200 queue. Get all the descriptors and set up dma to move the
1201 packets till the packet complete queue is empty..
1203 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1204 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1205 while(!(state & PCQ_EMPTY))
1208 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1212 if (status & RX_FREEQ_EMPT)
1215 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1216 iadev->rx_tmp_jif = jiffies;
1219 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1220 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1221 for (i = 1; i <= iadev->num_rx_desc; i++)
1223 printk("Test logic RUN!!!!\n");
1224 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1227 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1230 if (status & RX_EXCP_RCVD)
1232 /* probably need to handle the exception queue also. */
1233 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1238 if (status & RX_RAW_RCVD)
1240 /* need to handle the raw incoming cells. This deepnds on
1241 whether we have programmed to receive the raw cells or not.
1243 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1248 static void rx_dle_intr(struct atm_dev *dev)
1251 struct atm_vcc *vcc;
1252 struct sk_buff *skb;
1255 struct dle *dle, *cur_dle;
1258 iadev = INPH_IA_DEV(dev);
1260 /* free all the dles done, that is just update our own dle read pointer
1261 - do we really need to do this. Think not. */
1262 /* DMA is done, just get all the recevie buffers from the rx dma queue
1263 and push them up to the higher layer protocol. Also free the desc
1264 associated with the buffer. */
1265 dle = iadev->rx_dle_q.read;
1266 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1267 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1268 while(dle != cur_dle)
1270 /* free the DMAed skb */
1271 skb = skb_dequeue(&iadev->rx_dma_q);
1274 desc = ATM_DESC(skb);
1275 free_desc(dev, desc);
1277 if (!(len = skb->len))
1279 printk("rx_dle_intr: skb len 0\n");
1280 dev_kfree_skb_any(skb);
1284 struct cpcs_trailer *trailer;
1286 struct ia_vcc *ia_vcc;
1288 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1289 len, PCI_DMA_FROMDEVICE);
1290 /* no VCC related housekeeping done as yet. lets see */
1291 vcc = ATM_SKB(skb)->vcc;
1293 printk("IA: null vcc\n");
1294 dev_kfree_skb_any(skb);
1297 ia_vcc = INPH_IA_VCC(vcc);
1300 atomic_inc(&vcc->stats->rx_err);
1301 dev_kfree_skb_any(skb);
1302 atm_return(vcc, atm_guess_pdu2truesize(len));
1305 // get real pkt length pwang_test
1306 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1307 skb->len - sizeof(*trailer));
1308 length = swap_byte_order(trailer->length);
1309 if ((length > iadev->rx_buf_sz) || (length >
1310 (skb->len - sizeof(struct cpcs_trailer))))
1312 atomic_inc(&vcc->stats->rx_err);
1313 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1315 dev_kfree_skb_any(skb);
1316 atm_return(vcc, atm_guess_pdu2truesize(len));
1319 skb_trim(skb, length);
1321 /* Display the packet */
1322 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1323 xdump(skb->data, skb->len, "RX: ");
1326 IF_RX(printk("rx_dle_intr: skb push");)
1328 atomic_inc(&vcc->stats->rx);
1329 iadev->rx_pkt_cnt++;
1332 if (++dle == iadev->rx_dle_q.end)
1333 dle = iadev->rx_dle_q.start;
1335 iadev->rx_dle_q.read = dle;
1337 /* if the interrupts are masked because there were no free desc available,
1339 if (!iadev->rxing) {
1340 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1341 if (!(state & FREEQ_EMPTY)) {
1342 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1343 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1344 iadev->reass_reg+REASS_MASK_REG);
1351 static int open_rx(struct atm_vcc *vcc)
1354 u_short __iomem *vc_table;
1355 u_short __iomem *reass_ptr;
1356 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1358 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1359 iadev = INPH_IA_DEV(vcc->dev);
1360 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1361 if (iadev->phy_type & FE_25MBIT_PHY) {
1362 printk("IA: ABR not support\n");
1366 /* Make only this VCI in the vc table valid and let all
1367 others be invalid entries */
1368 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1369 vc_table += vcc->vci;
1370 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1372 *vc_table = vcc->vci << 6;
1373 /* Also keep a list of open rx vcs so that we can attach them with
1374 incoming PDUs later. */
1375 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1376 (vcc->qos.txtp.traffic_class == ATM_ABR))
1378 srv_cls_param_t srv_p;
1379 init_abr_vc(iadev, &srv_p);
1380 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1382 else { /* for UBR later may need to add CBR logic */
1383 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1384 reass_ptr += vcc->vci;
1385 *reass_ptr = NO_AAL5_PKT;
1388 if (iadev->rx_open[vcc->vci])
1389 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1390 vcc->dev->number, vcc->vci);
1391 iadev->rx_open[vcc->vci] = vcc;
1395 static int rx_init(struct atm_dev *dev)
1398 struct rx_buf_desc __iomem *buf_desc_ptr;
1399 unsigned long rx_pkt_start = 0;
1401 struct abr_vc_table *abr_vc_table;
1404 int i,j, vcsize_sel;
1405 u_short freeq_st_adr;
1406 u_short *freeq_start;
1408 iadev = INPH_IA_DEV(dev);
1409 // spin_lock_init(&iadev->rx_lock);
1411 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1412 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1413 &iadev->rx_dle_dma);
1415 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1418 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1419 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1420 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1421 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1422 /* the end of the dle q points to the entry after the last
1423 DLE that can be used. */
1425 /* write the upper 20 bits of the start address to rx list address register */
1426 /* We know this is 32bit bus addressed so the following is safe */
1427 writel(iadev->rx_dle_dma & 0xfffff000,
1428 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1429 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1430 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1431 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1432 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1433 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1434 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1436 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1437 writew(0, iadev->reass_reg+MODE_REG);
1438 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1440 /* Receive side control memory map
1441 -------------------------------
1443 Buffer descr 0x0000 (736 - 23K)
1444 VP Table 0x5c00 (256 - 512)
1445 Except q 0x5e00 (128 - 512)
1446 Free buffer q 0x6000 (1K - 2K)
1447 Packet comp q 0x6800 (1K - 2K)
1448 Reass Table 0x7000 (1K - 2K)
1449 VC Table 0x7800 (1K - 2K)
1450 ABR VC Table 0x8000 (1K - 32K)
1453 /* Base address for Buffer Descriptor Table */
1454 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1455 /* Set the buffer size register */
1456 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1458 /* Initialize each entry in the Buffer Descriptor Table */
1459 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1460 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1461 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1463 rx_pkt_start = iadev->rx_pkt_ram;
1464 for(i=1; i<=iadev->num_rx_desc; i++)
1466 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1467 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1468 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1470 rx_pkt_start += iadev->rx_buf_sz;
1472 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1473 i = FREE_BUF_DESC_Q*iadev->memSize;
1474 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1475 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1476 writew(i+iadev->num_rx_desc*sizeof(u_short),
1477 iadev->reass_reg+FREEQ_ED_ADR);
1478 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1479 writew(i+iadev->num_rx_desc*sizeof(u_short),
1480 iadev->reass_reg+FREEQ_WR_PTR);
1481 /* Fill the FREEQ with all the free descriptors. */
1482 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1483 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1484 for(i=1; i<=iadev->num_rx_desc; i++)
1486 *freeq_start = (u_short)i;
1489 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1490 /* Packet Complete Queue */
1491 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1492 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1493 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1494 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1495 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1497 /* Exception Queue */
1498 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1499 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1500 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1501 iadev->reass_reg+EXCP_Q_ED_ADR);
1502 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1503 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1505 /* Load local copy of FREEQ and PCQ ptrs */
1506 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1507 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1508 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1509 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1510 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1511 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1512 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1513 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1515 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1516 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1517 iadev->rfL.pcq_wr);)
1518 /* just for check - no VP TBL */
1520 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1521 /* initialize VP Table for invalid VPIs
1522 - I guess we can write all 1s or 0x000f in the entire memory
1523 space or something similar.
1526 /* This seems to work and looks right to me too !!! */
1527 i = REASS_TABLE * iadev->memSize;
1528 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1529 /* initialize Reassembly table to I don't know what ???? */
1530 reass_table = (u16 *)(iadev->reass_ram+i);
1531 j = REASS_TABLE_SZ * iadev->memSize;
1532 for(i=0; i < j; i++)
1533 *reass_table++ = NO_AAL5_PKT;
1536 while (i != iadev->num_vc) {
1540 i = RX_VC_TABLE * iadev->memSize;
1541 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1542 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1543 j = RX_VC_TABLE_SZ * iadev->memSize;
1544 for(i = 0; i < j; i++)
1546 /* shift the reassembly pointer by 3 + lower 3 bits of
1547 vc_lkup_base register (=3 for 1K VCs) and the last byte
1548 is those low 3 bits.
1549 Shall program this later.
1551 *vc_table = (i << 6) | 15; /* for invalid VCI */
1555 i = ABR_VC_TABLE * iadev->memSize;
1556 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1558 i = ABR_VC_TABLE * iadev->memSize;
1559 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1560 j = REASS_TABLE_SZ * iadev->memSize;
1561 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1562 for(i = 0; i < j; i++) {
1563 abr_vc_table->rdf = 0x0003;
1564 abr_vc_table->air = 0x5eb1;
1568 /* Initialize other registers */
1570 /* VP Filter Register set for VC Reassembly only */
1571 writew(0xff00, iadev->reass_reg+VP_FILTER);
1572 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1573 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1575 /* Packet Timeout Count related Registers :
1576 Set packet timeout to occur in about 3 seconds
1577 Set Packet Aging Interval count register to overflow in about 4 us
1579 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1581 i = (j >> 6) & 0xFF;
1583 i |= ((j << 2) & 0xFF00);
1584 writew(i, iadev->reass_reg+TMOUT_RANGE);
1586 /* initiate the desc_tble */
1587 for(i=0; i<iadev->num_tx_desc;i++)
1588 iadev->desc_tbl[i].timestamp = 0;
1590 /* to clear the interrupt status register - read it */
1591 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1593 /* Mask Register - clear it */
1594 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1596 skb_queue_head_init(&iadev->rx_dma_q);
1597 iadev->rx_free_desc_qhead = NULL;
1599 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1600 if (!iadev->rx_open) {
1601 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1607 iadev->rx_pkt_cnt = 0;
1609 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1613 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1621 The memory map suggested in appendix A and the coding for it.
1622 Keeping it around just in case we change our mind later.
1624 Buffer descr 0x0000 (128 - 4K)
1625 UBR sched 0x1000 (1K - 4K)
1626 UBR Wait q 0x2000 (1K - 4K)
1627 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1629 extended VC 0x4000 (1K - 8K)
1630 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1631 CBR sched 0x7000 (as needed)
1632 VC table 0x8000 (1K - 32K)
1635 static void tx_intr(struct atm_dev *dev)
1638 unsigned short status;
1639 unsigned long flags;
1641 iadev = INPH_IA_DEV(dev);
1643 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1644 if (status & TRANSMIT_DONE){
1646 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1647 spin_lock_irqsave(&iadev->tx_lock, flags);
1649 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1650 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1651 if (iadev->close_pending)
1652 wake_up(&iadev->close_wait);
1654 if (status & TCQ_NOT_EMPTY)
1656 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1660 static void tx_dle_intr(struct atm_dev *dev)
1663 struct dle *dle, *cur_dle;
1664 struct sk_buff *skb;
1665 struct atm_vcc *vcc;
1666 struct ia_vcc *iavcc;
1668 unsigned long flags;
1670 iadev = INPH_IA_DEV(dev);
1671 spin_lock_irqsave(&iadev->tx_lock, flags);
1672 dle = iadev->tx_dle_q.read;
1673 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1674 (sizeof(struct dle)*DLE_ENTRIES - 1);
1675 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1676 while (dle != cur_dle)
1678 /* free the DMAed skb */
1679 skb = skb_dequeue(&iadev->tx_dma_q);
1682 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1683 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1684 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1687 vcc = ATM_SKB(skb)->vcc;
1689 printk("tx_dle_intr: vcc is null\n");
1690 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1691 dev_kfree_skb_any(skb);
1695 iavcc = INPH_IA_VCC(vcc);
1697 printk("tx_dle_intr: iavcc is null\n");
1698 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699 dev_kfree_skb_any(skb);
1702 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1703 if ((vcc->pop) && (skb->len != 0))
1708 dev_kfree_skb_any(skb);
1711 else { /* Hold the rate-limited skb for flow control */
1712 IA_SKB_STATE(skb) |= IA_DLED;
1713 skb_queue_tail(&iavcc->txing_skb, skb);
1715 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1716 if (++dle == iadev->tx_dle_q.end)
1717 dle = iadev->tx_dle_q.start;
1719 iadev->tx_dle_q.read = dle;
1720 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1723 static int open_tx(struct atm_vcc *vcc)
1725 struct ia_vcc *ia_vcc;
1730 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1731 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1732 iadev = INPH_IA_DEV(vcc->dev);
1734 if (iadev->phy_type & FE_25MBIT_PHY) {
1735 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1736 printk("IA: ABR not support\n");
1739 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1740 printk("IA: CBR not support\n");
1744 ia_vcc = INPH_IA_VCC(vcc);
1745 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1746 if (vcc->qos.txtp.max_sdu >
1747 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1748 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1749 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1750 vcc->dev_data = NULL;
1754 ia_vcc->vc_desc_cnt = 0;
1758 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1759 vcc->qos.txtp.pcr = iadev->LineRate;
1760 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1761 vcc->qos.txtp.pcr = iadev->LineRate;
1762 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1763 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1764 if (vcc->qos.txtp.pcr > iadev->LineRate)
1765 vcc->qos.txtp.pcr = iadev->LineRate;
1766 ia_vcc->pcr = vcc->qos.txtp.pcr;
1768 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1769 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1770 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1771 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1772 if (ia_vcc->pcr < iadev->rate_limit)
1773 skb_queue_head_init (&ia_vcc->txing_skb);
1774 if (ia_vcc->pcr < iadev->rate_limit) {
1775 struct sock *sk = sk_atm(vcc);
1777 if (vcc->qos.txtp.max_sdu != 0) {
1778 if (ia_vcc->pcr > 60000)
1779 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1780 else if (ia_vcc->pcr > 2000)
1781 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1783 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1786 sk->sk_sndbuf = 24576;
1789 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1790 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1793 memset((caddr_t)vc, 0, sizeof(*vc));
1794 memset((caddr_t)evc, 0, sizeof(*evc));
1796 /* store the most significant 4 bits of vci as the last 4 bits
1797 of first part of atm header.
1798 store the last 12 bits of vci as first 12 bits of the second
1799 part of the atm header.
1801 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1802 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1804 /* check the following for different traffic classes */
1805 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1808 vc->status = CRC_APPEND;
1809 vc->acr = cellrate_to_float(iadev->LineRate);
1810 if (vcc->qos.txtp.pcr > 0)
1811 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1812 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1813 vcc->qos.txtp.max_pcr,vc->acr);)
1815 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1816 { srv_cls_param_t srv_p;
1817 IF_ABR(printk("Tx ABR VCC\n");)
1818 init_abr_vc(iadev, &srv_p);
1819 if (vcc->qos.txtp.pcr > 0)
1820 srv_p.pcr = vcc->qos.txtp.pcr;
1821 if (vcc->qos.txtp.min_pcr > 0) {
1822 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1823 if (tmpsum > iadev->LineRate)
1825 srv_p.mcr = vcc->qos.txtp.min_pcr;
1826 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1829 if (vcc->qos.txtp.icr)
1830 srv_p.icr = vcc->qos.txtp.icr;
1831 if (vcc->qos.txtp.tbe)
1832 srv_p.tbe = vcc->qos.txtp.tbe;
1833 if (vcc->qos.txtp.frtt)
1834 srv_p.frtt = vcc->qos.txtp.frtt;
1835 if (vcc->qos.txtp.rif)
1836 srv_p.rif = vcc->qos.txtp.rif;
1837 if (vcc->qos.txtp.rdf)
1838 srv_p.rdf = vcc->qos.txtp.rdf;
1839 if (vcc->qos.txtp.nrm_pres)
1840 srv_p.nrm = vcc->qos.txtp.nrm;
1841 if (vcc->qos.txtp.trm_pres)
1842 srv_p.trm = vcc->qos.txtp.trm;
1843 if (vcc->qos.txtp.adtf_pres)
1844 srv_p.adtf = vcc->qos.txtp.adtf;
1845 if (vcc->qos.txtp.cdf_pres)
1846 srv_p.cdf = vcc->qos.txtp.cdf;
1847 if (srv_p.icr > srv_p.pcr)
1848 srv_p.icr = srv_p.pcr;
1849 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1850 srv_p.pcr, srv_p.mcr);)
1851 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1852 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1853 if (iadev->phy_type & FE_25MBIT_PHY) {
1854 printk("IA: CBR not support\n");
1857 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1858 IF_CBR(printk("PCR is not availble\n");)
1862 vc->status = CRC_APPEND;
1863 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1868 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1870 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1871 IF_EVENT(printk("ia open_tx returning \n");)
1876 static int tx_init(struct atm_dev *dev)
1879 struct tx_buf_desc *buf_desc_ptr;
1880 unsigned int tx_pkt_start;
1892 iadev = INPH_IA_DEV(dev);
1893 spin_lock_init(&iadev->tx_lock);
1895 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1896 readw(iadev->seg_reg+SEG_MASK_REG));)
1898 /* Allocate 4k (boundary aligned) bytes */
1899 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1900 &iadev->tx_dle_dma);
1902 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1905 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1906 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1907 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1908 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1910 /* write the upper 20 bits of the start address to tx list address register */
1911 writel(iadev->tx_dle_dma & 0xfffff000,
1912 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1913 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1914 writew(0, iadev->seg_reg+MODE_REG_0);
1915 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1916 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1917 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1918 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1921 Transmit side control memory map
1922 --------------------------------
1923 Buffer descr 0x0000 (128 - 4K)
1924 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1927 CBR Table 0x1800 (as needed) - 6K
1928 UBR Table 0x3000 (1K - 4K) - 12K
1929 UBR Wait queue 0x4000 (1K - 4K) - 16K
1930 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1931 ABR Tbl - 20K, ABR Wq - 22K
1932 extended VC 0x6000 (1K - 8K) - 24K
1933 VC Table 0x8000 (1K - 32K) - 32K
1935 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1936 and Wait q, which can be allotted later.
1939 /* Buffer Descriptor Table Base address */
1940 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1942 /* initialize each entry in the buffer descriptor table */
1943 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1944 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1946 tx_pkt_start = TX_PACKET_RAM;
1947 for(i=1; i<=iadev->num_tx_desc; i++)
1949 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1950 buf_desc_ptr->desc_mode = AAL5;
1951 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1952 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1954 tx_pkt_start += iadev->tx_buf_sz;
1956 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1957 if (!iadev->tx_buf) {
1958 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1961 for (i= 0; i< iadev->num_tx_desc; i++)
1963 struct cpcs_trailer *cpcs;
1965 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1967 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1968 goto err_free_tx_bufs;
1970 iadev->tx_buf[i].cpcs = cpcs;
1971 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1972 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1974 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1975 sizeof(struct desc_tbl_t), GFP_KERNEL);
1976 if (!iadev->desc_tbl) {
1977 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1978 goto err_free_all_tx_bufs;
1981 /* Communication Queues base address */
1982 i = TX_COMP_Q * iadev->memSize;
1983 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1985 /* Transmit Complete Queue */
1986 writew(i, iadev->seg_reg+TCQ_ST_ADR);
1987 writew(i, iadev->seg_reg+TCQ_RD_PTR);
1988 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1989 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1990 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1991 iadev->seg_reg+TCQ_ED_ADR);
1992 /* Fill the TCQ with all the free descriptors. */
1993 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
1994 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
1995 for(i=1; i<=iadev->num_tx_desc; i++)
1997 *tcq_start = (u_short)i;
2001 /* Packet Ready Queue */
2002 i = PKT_RDY_Q * iadev->memSize;
2003 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2004 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2005 iadev->seg_reg+PRQ_ED_ADR);
2006 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2007 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2009 /* Load local copy of PRQ and TCQ ptrs */
2010 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2011 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2012 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2014 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2015 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2016 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2018 /* Just for safety initializing the queue to have desc 1 always */
2019 /* Fill the PRQ with all the free descriptors. */
2020 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2021 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2022 for(i=1; i<=iadev->num_tx_desc; i++)
2024 *prq_start = (u_short)0; /* desc 1 in all entries */
2028 IF_INIT(printk("Start CBR Init\n");)
2029 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2030 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2031 #else /* Charlie's logic is wrong ? */
2032 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2033 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2034 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2037 IF_INIT(printk("value in register = 0x%x\n",
2038 readw(iadev->seg_reg+CBR_PTR_BASE));)
2039 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2040 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2041 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2042 readw(iadev->seg_reg+CBR_TAB_BEG));)
2043 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2044 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2045 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2046 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2047 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2048 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2049 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2050 readw(iadev->seg_reg+CBR_TAB_END+1));)
2052 /* Initialize the CBR Schedualing Table */
2053 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2054 0, iadev->num_vc*6);
2055 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2056 iadev->CbrEntryPt = 0;
2057 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2058 iadev->NumEnabledCBR = 0;
2060 /* UBR scheduling Table and wait queue */
2061 /* initialize all bytes of UBR scheduler table and wait queue to 0
2062 - SCHEDSZ is 1K (# of entries).
2063 - UBR Table size is 4K
2064 - UBR wait queue is 4K
2065 since the table and wait queues are contiguous, all the bytes
2066 can be initialized by one memeset.
2071 while (i != iadev->num_vc) {
2076 i = MAIN_VC_TABLE * iadev->memSize;
2077 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2078 i = EXT_VC_TABLE * iadev->memSize;
2079 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2080 i = UBR_SCHED_TABLE * iadev->memSize;
2081 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2082 i = UBR_WAIT_Q * iadev->memSize;
2083 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2084 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2085 0, iadev->num_vc*8);
2086 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2087 /* initialize all bytes of ABR scheduler table and wait queue to 0
2088 - SCHEDSZ is 1K (# of entries).
2089 - ABR Table size is 2K
2090 - ABR wait queue is 2K
2091 since the table and wait queues are contiguous, all the bytes
2092 can be intialized by one memeset.
2094 i = ABR_SCHED_TABLE * iadev->memSize;
2095 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2096 i = ABR_WAIT_Q * iadev->memSize;
2097 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2099 i = ABR_SCHED_TABLE*iadev->memSize;
2100 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2101 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2102 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2103 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2104 if (!iadev->testTable) {
2105 printk("Get freepage failed\n");
2106 goto err_free_desc_tbl;
2108 for(i=0; i<iadev->num_vc; i++)
2110 memset((caddr_t)vc, 0, sizeof(*vc));
2111 memset((caddr_t)evc, 0, sizeof(*evc));
2112 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2114 if (!iadev->testTable[i])
2115 goto err_free_test_tables;
2116 iadev->testTable[i]->lastTime = 0;
2117 iadev->testTable[i]->fract = 0;
2118 iadev->testTable[i]->vc_status = VC_UBR;
2123 /* Other Initialization */
2125 /* Max Rate Register */
2126 if (iadev->phy_type & FE_25MBIT_PHY) {
2127 writew(RATE25, iadev->seg_reg+MAXRATE);
2128 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2131 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2132 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2134 /* Set Idle Header Reigisters to be sure */
2135 writew(0, iadev->seg_reg+IDLEHEADHI);
2136 writew(0, iadev->seg_reg+IDLEHEADLO);
2138 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2139 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2141 iadev->close_pending = 0;
2142 init_waitqueue_head(&iadev->close_wait);
2143 init_waitqueue_head(&iadev->timeout_wait);
2144 skb_queue_head_init(&iadev->tx_dma_q);
2145 ia_init_rtn_q(&iadev->tx_return_q);
2147 /* RM Cell Protocol ID and Message Type */
2148 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2149 skb_queue_head_init (&iadev->tx_backlog);
2151 /* Mode Register 1 */
2152 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2154 /* Mode Register 0 */
2155 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2157 /* Interrupt Status Register - read to clear */
2158 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2160 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2161 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2162 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2163 iadev->tx_pkt_cnt = 0;
2164 iadev->rate_limit = iadev->LineRate / 3;
2168 err_free_test_tables:
2170 kfree(iadev->testTable[i]);
2171 kfree(iadev->testTable);
2173 kfree(iadev->desc_tbl);
2174 err_free_all_tx_bufs:
2175 i = iadev->num_tx_desc;
2178 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2180 pci_unmap_single(iadev->pci, desc->dma_addr,
2181 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2184 kfree(iadev->tx_buf);
2186 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2192 static irqreturn_t ia_int(int irq, void *dev_id)
2194 struct atm_dev *dev;
2196 unsigned int status;
2200 iadev = INPH_IA_DEV(dev);
2201 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2204 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2205 if (status & STAT_REASSINT)
2208 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2211 if (status & STAT_DLERINT)
2213 /* Clear this bit by writing a 1 to it. */
2214 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2217 if (status & STAT_SEGINT)
2220 IF_EVENT(printk("IA: tx_intr \n");)
2223 if (status & STAT_DLETINT)
2225 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2228 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2230 if (status & STAT_FEINT)
2231 IaFrontEndIntr(iadev);
2234 return IRQ_RETVAL(handled);
2239 /*----------------------------- entries --------------------------------*/
2240 static int get_esi(struct atm_dev *dev)
2247 iadev = INPH_IA_DEV(dev);
2248 mac1 = cpu_to_be32(le32_to_cpu(readl(
2249 iadev->reg+IPHASE5575_MAC1)));
2250 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2251 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2252 for (i=0; i<MAC1_LEN; i++)
2253 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2255 for (i=0; i<MAC2_LEN; i++)
2256 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2260 static int reset_sar(struct atm_dev *dev)
2264 unsigned int pci[64];
2266 iadev = INPH_IA_DEV(dev);
2268 if ((error = pci_read_config_dword(iadev->pci,
2269 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2271 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2273 if ((error = pci_write_config_dword(iadev->pci,
2274 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2281 static int __devinit ia_init(struct atm_dev *dev)
2284 unsigned long real_base;
2286 unsigned short command;
2289 /* The device has been identified and registered. Now we read
2290 necessary configuration info like memory base address,
2291 interrupt number etc */
2293 IF_INIT(printk(">ia_init\n");)
2294 dev->ci_range.vpi_bits = 0;
2295 dev->ci_range.vci_bits = NR_VCI_LD;
2297 iadev = INPH_IA_DEV(dev);
2298 real_base = pci_resource_start (iadev->pci, 0);
2299 iadev->irq = iadev->pci->irq;
2301 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2303 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2307 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2308 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2310 /* find mapping size of board */
2312 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2314 if (iadev->pci_map_size == 0x100000){
2315 iadev->num_vc = 4096;
2316 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2319 else if (iadev->pci_map_size == 0x40000) {
2320 iadev->num_vc = 1024;
2324 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2327 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2329 /* enable bus mastering */
2330 pci_set_master(iadev->pci);
2333 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2337 /* mapping the physical address to a virtual address in address space */
2338 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2342 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2346 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2347 dev->number, iadev->pci->revision, base, iadev->irq);)
2349 /* filling the iphase dev structure */
2350 iadev->mem = iadev->pci_map_size /2;
2351 iadev->real_base = real_base;
2354 /* Bus Interface Control Registers */
2355 iadev->reg = base + REG_BASE;
2356 /* Segmentation Control Registers */
2357 iadev->seg_reg = base + SEG_BASE;
2358 /* Reassembly Control Registers */
2359 iadev->reass_reg = base + REASS_BASE;
2360 /* Front end/ DMA control registers */
2361 iadev->phy = base + PHY_BASE;
2362 iadev->dma = base + PHY_BASE;
2363 /* RAM - Segmentation RAm and Reassembly RAM */
2364 iadev->ram = base + ACTUAL_RAM_BASE;
2365 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2366 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2368 /* lets print out the above */
2369 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2370 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2371 iadev->phy, iadev->ram, iadev->seg_ram,
2374 /* lets try reading the MAC address */
2375 error = get_esi(dev);
2377 iounmap(iadev->base);
2381 for (i=0; i < ESI_LEN; i++)
2382 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2386 if (reset_sar(dev)) {
2387 iounmap(iadev->base);
2388 printk("IA: reset SAR fail, please try again\n");
2394 static void ia_update_stats(IADEV *iadev) {
2395 if (!iadev->carrier_detect)
2397 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2398 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2399 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2400 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2401 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2402 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2406 static void ia_led_timer(unsigned long arg) {
2407 unsigned long flags;
2408 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2410 static u32 ctrl_reg;
2411 for (i = 0; i < iadev_count; i++) {
2413 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2414 if (blinking[i] == 0) {
2416 ctrl_reg &= (~CTRL_LED);
2417 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2418 ia_update_stats(ia_dev[i]);
2422 ctrl_reg |= CTRL_LED;
2423 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2424 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2425 if (ia_dev[i]->close_pending)
2426 wake_up(&ia_dev[i]->close_wait);
2427 ia_tx_poll(ia_dev[i]);
2428 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2432 mod_timer(&ia_timer, jiffies + HZ / 4);
2436 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2439 writel(value, INPH_IA_DEV(dev)->phy+addr);
2442 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2444 return readl(INPH_IA_DEV(dev)->phy+addr);
2447 static void ia_free_tx(IADEV *iadev)
2451 kfree(iadev->desc_tbl);
2452 for (i = 0; i < iadev->num_vc; i++)
2453 kfree(iadev->testTable[i]);
2454 kfree(iadev->testTable);
2455 for (i = 0; i < iadev->num_tx_desc; i++) {
2456 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2458 pci_unmap_single(iadev->pci, desc->dma_addr,
2459 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2462 kfree(iadev->tx_buf);
2463 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2467 static void ia_free_rx(IADEV *iadev)
2469 kfree(iadev->rx_open);
2470 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2474 static int __devinit ia_start(struct atm_dev *dev)
2480 IF_EVENT(printk(">ia_start\n");)
2481 iadev = INPH_IA_DEV(dev);
2482 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2483 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2484 dev->number, iadev->irq);
2488 /* @@@ should release IRQ on error */
2489 /* enabling memory + master */
2490 if ((error = pci_write_config_word(iadev->pci,
2492 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2494 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2495 "master (0x%x)\n",dev->number, error);
2501 /* Maybe we should reset the front end, initialize Bus Interface Control
2502 Registers and see. */
2504 IF_INIT(printk("Bus ctrl reg: %08x\n",
2505 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2506 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2507 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2515 | CTRL_DLETMASK /* shud be removed l8r */
2522 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2524 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2525 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2526 printk("Bus status reg after init: %08x\n",
2527 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2530 error = tx_init(dev);
2533 error = rx_init(dev);
2537 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2538 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2539 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2540 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2541 phy = 0; /* resolve compiler complaint */
2543 if ((phy=ia_phy_get(dev,0)) == 0x30)
2544 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2546 printk("IA: utopia,rev.%0x\n",phy);)
2548 if (iadev->phy_type & FE_25MBIT_PHY)
2549 ia_mb25_init(iadev);
2550 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2551 ia_suni_pm7345_init(iadev);
2553 error = suni_init(dev);
2556 if (dev->phy->start) {
2557 error = dev->phy->start(dev);
2561 /* Get iadev->carrier_detect status */
2562 IaFrontEndIntr(iadev);
2571 free_irq(iadev->irq, dev);
2576 static void ia_close(struct atm_vcc *vcc)
2581 struct ia_vcc *ia_vcc;
2582 struct sk_buff *skb = NULL;
2583 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2584 unsigned long closetime, flags;
2586 iadev = INPH_IA_DEV(vcc->dev);
2587 ia_vcc = INPH_IA_VCC(vcc);
2588 if (!ia_vcc) return;
2590 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2591 ia_vcc->vc_desc_cnt,vcc->vci);)
2592 clear_bit(ATM_VF_READY,&vcc->flags);
2593 skb_queue_head_init (&tmp_tx_backlog);
2594 skb_queue_head_init (&tmp_vcc_backlog);
2595 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2596 iadev->close_pending++;
2597 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2598 schedule_timeout(50);
2599 finish_wait(&iadev->timeout_wait, &wait);
2600 spin_lock_irqsave(&iadev->tx_lock, flags);
2601 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2602 if (ATM_SKB(skb)->vcc == vcc){
2603 if (vcc->pop) vcc->pop(vcc, skb);
2604 else dev_kfree_skb_any(skb);
2607 skb_queue_tail(&tmp_tx_backlog, skb);
2609 while((skb = skb_dequeue(&tmp_tx_backlog)))
2610 skb_queue_tail(&iadev->tx_backlog, skb);
2611 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2612 closetime = 300000 / ia_vcc->pcr;
2615 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2616 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2617 spin_lock_irqsave(&iadev->tx_lock, flags);
2618 iadev->close_pending--;
2619 iadev->testTable[vcc->vci]->lastTime = 0;
2620 iadev->testTable[vcc->vci]->fract = 0;
2621 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2622 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2623 if (vcc->qos.txtp.min_pcr > 0)
2624 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2626 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2627 ia_vcc = INPH_IA_VCC(vcc);
2628 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2629 ia_cbrVc_close (vcc);
2631 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2634 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2635 // reset reass table
2636 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2637 vc_table += vcc->vci;
2638 *vc_table = NO_AAL5_PKT;
2640 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2641 vc_table += vcc->vci;
2642 *vc_table = (vcc->vci << 6) | 15;
2643 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2644 struct abr_vc_table __iomem *abr_vc_table =
2645 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2646 abr_vc_table += vcc->vci;
2647 abr_vc_table->rdf = 0x0003;
2648 abr_vc_table->air = 0x5eb1;
2650 // Drain the packets
2651 rx_dle_intr(vcc->dev);
2652 iadev->rx_open[vcc->vci] = NULL;
2654 kfree(INPH_IA_VCC(vcc));
2656 vcc->dev_data = NULL;
2657 clear_bit(ATM_VF_ADDR,&vcc->flags);
2661 static int ia_open(struct atm_vcc *vcc)
2664 struct ia_vcc *ia_vcc;
2666 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2668 IF_EVENT(printk("ia: not partially allocated resources\n");)
2669 vcc->dev_data = NULL;
2671 iadev = INPH_IA_DEV(vcc->dev);
2672 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2674 IF_EVENT(printk("iphase open: unspec part\n");)
2675 set_bit(ATM_VF_ADDR,&vcc->flags);
2677 if (vcc->qos.aal != ATM_AAL5)
2679 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2680 vcc->dev->number, vcc->vpi, vcc->vci);)
2682 /* Device dependent initialization */
2683 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2684 if (!ia_vcc) return -ENOMEM;
2685 vcc->dev_data = ia_vcc;
2687 if ((error = open_rx(vcc)))
2689 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2694 if ((error = open_tx(vcc)))
2696 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2701 set_bit(ATM_VF_READY,&vcc->flags);
2705 static u8 first = 1;
2707 ia_timer.expires = jiffies + 3*HZ;
2708 add_timer(&ia_timer);
2713 IF_EVENT(printk("ia open returning\n");)
2717 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2719 IF_EVENT(printk(">ia_change_qos\n");)
2723 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2729 IF_EVENT(printk(">ia_ioctl\n");)
2730 if (cmd != IA_CMD) {
2731 if (!dev->phy->ioctl) return -EINVAL;
2732 return dev->phy->ioctl(dev,cmd,arg);
2734 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2735 board = ia_cmds.status;
2736 if ((board < 0) || (board > iadev_count))
2738 iadev = ia_dev[board];
2739 switch (ia_cmds.cmd) {
2742 switch (ia_cmds.sub_cmd) {
2744 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2745 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2749 case MEMDUMP_SEGREG:
2750 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2751 tmps = (u16 __user *)ia_cmds.buf;
2752 for(i=0; i<0x80; i+=2, tmps++)
2753 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2757 case MEMDUMP_REASSREG:
2758 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2759 tmps = (u16 __user *)ia_cmds.buf;
2760 for(i=0; i<0x80; i+=2, tmps++)
2761 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2767 ia_regs_t *regs_local;
2771 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2773 if (!regs_local) return -ENOMEM;
2774 ffL = ®s_local->ffredn;
2775 rfL = ®s_local->rfredn;
2776 /* Copy real rfred registers into the local copy */
2777 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2778 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2779 /* Copy real ffred registers into the local copy */
2780 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2781 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2783 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2788 printk("Board %d registers dumped\n", board);
2794 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2802 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2803 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2808 struct k_sonet_stats *stats;
2809 stats = &PRIV(_ia_dev[board])->sonet_stats;
2810 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2811 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2812 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2813 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2814 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2815 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2816 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2817 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2818 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2823 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2824 for (i = 1; i <= iadev->num_rx_desc; i++)
2825 free_desc(_ia_dev[board], i);
2826 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2827 iadev->reass_reg+REASS_MASK_REG);
2834 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2835 IaFrontEndIntr(iadev);
2838 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2841 IADebugFlag = ia_cmds.maddr;
2842 printk("New debug option loaded\n");
2858 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2859 void __user *optval, int optlen)
2861 IF_EVENT(printk(">ia_getsockopt\n");)
2865 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2866 void __user *optval, unsigned int optlen)
2868 IF_EVENT(printk(">ia_setsockopt\n");)
2872 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2875 struct tx_buf_desc __iomem *buf_desc_ptr;
2879 struct cpcs_trailer *trailer;
2880 struct ia_vcc *iavcc;
2882 iadev = INPH_IA_DEV(vcc->dev);
2883 iavcc = INPH_IA_VCC(vcc);
2884 if (!iavcc->txing) {
2885 printk("discard packet on closed VC\n");
2889 dev_kfree_skb_any(skb);
2893 if (skb->len > iadev->tx_buf_sz - 8) {
2894 printk("Transmit size over tx buffer size\n");
2898 dev_kfree_skb_any(skb);
2901 if ((unsigned long)skb->data & 3) {
2902 printk("Misaligned SKB\n");
2906 dev_kfree_skb_any(skb);
2909 /* Get a descriptor number from our free descriptor queue
2910 We get the descr number from the TCQ now, since I am using
2911 the TCQ as a free buffer queue. Initially TCQ will be
2912 initialized with all the descriptors and is hence, full.
2914 desc = get_desc (iadev, iavcc);
2917 comp_code = desc >> 13;
2920 if ((desc == 0) || (desc > iadev->num_tx_desc))
2922 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2923 atomic_inc(&vcc->stats->tx);
2927 dev_kfree_skb_any(skb);
2928 return 0; /* return SUCCESS */
2933 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2937 /* remember the desc and vcc mapping */
2938 iavcc->vc_desc_cnt++;
2939 iadev->desc_tbl[desc-1].iavcc = iavcc;
2940 iadev->desc_tbl[desc-1].txskb = skb;
2941 IA_SKB_STATE(skb) = 0;
2943 iadev->ffL.tcq_rd += 2;
2944 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2945 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2946 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2948 /* Put the descriptor number in the packet ready queue
2949 and put the updated write pointer in the DLE field
2951 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2953 iadev->ffL.prq_wr += 2;
2954 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2955 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2957 /* Figure out the exact length of the packet and padding required to
2958 make it aligned on a 48 byte boundary. */
2959 total_len = skb->len + sizeof(struct cpcs_trailer);
2960 total_len = ((total_len + 47) / 48) * 48;
2961 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2963 /* Put the packet in a tx buffer */
2964 trailer = iadev->tx_buf[desc-1].cpcs;
2965 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2966 skb, skb->data, skb->len, desc);)
2967 trailer->control = 0;
2969 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2970 trailer->crc32 = 0; /* not needed - dummy bytes */
2972 /* Display the packet */
2973 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2974 skb->len, tcnter++);
2975 xdump(skb->data, skb->len, "TX: ");
2978 /* Build the buffer descriptor */
2979 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2980 buf_desc_ptr += desc; /* points to the corresponding entry */
2981 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2982 /* Huh ? p.115 of users guide describes this as a read-only register */
2983 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2984 buf_desc_ptr->vc_index = vcc->vci;
2985 buf_desc_ptr->bytes = total_len;
2987 if (vcc->qos.txtp.traffic_class == ATM_ABR)
2988 clear_lockup (vcc, iadev);
2990 /* Build the DLE structure */
2991 wr_ptr = iadev->tx_dle_q.write;
2992 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
2993 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2994 skb->len, PCI_DMA_TODEVICE);
2995 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
2996 buf_desc_ptr->buf_start_lo;
2997 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2998 wr_ptr->bytes = skb->len;
3000 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3001 if ((wr_ptr->bytes >> 2) == 0xb)
3002 wr_ptr->bytes = 0x30;
3004 wr_ptr->mode = TX_DLE_PSI;
3005 wr_ptr->prq_wr_ptr_data = 0;
3007 /* end is not to be used for the DLE q */
3008 if (++wr_ptr == iadev->tx_dle_q.end)
3009 wr_ptr = iadev->tx_dle_q.start;
3011 /* Build trailer dle */
3012 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3013 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3014 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3016 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3017 wr_ptr->mode = DMA_INT_ENABLE;
3018 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3020 /* end is not to be used for the DLE q */
3021 if (++wr_ptr == iadev->tx_dle_q.end)
3022 wr_ptr = iadev->tx_dle_q.start;
3024 iadev->tx_dle_q.write = wr_ptr;
3025 ATM_DESC(skb) = vcc->vci;
3026 skb_queue_tail(&iadev->tx_dma_q, skb);
3028 atomic_inc(&vcc->stats->tx);
3029 iadev->tx_pkt_cnt++;
3030 /* Increment transaction counter */
3031 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3034 /* add flow control logic */
3035 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3036 if (iavcc->vc_desc_cnt > 10) {
3037 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3038 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3039 iavcc->flow_inc = -1;
3040 iavcc->saved_tx_quota = vcc->tx_quota;
3041 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3042 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3043 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3044 iavcc->flow_inc = 0;
3048 IF_TX(printk("ia send done\n");)
3052 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3055 struct ia_vcc *iavcc;
3056 unsigned long flags;
3058 iadev = INPH_IA_DEV(vcc->dev);
3059 iavcc = INPH_IA_VCC(vcc);
3060 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3063 printk(KERN_CRIT "null skb in ia_send\n");
3064 else dev_kfree_skb_any(skb);
3067 spin_lock_irqsave(&iadev->tx_lock, flags);
3068 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3069 dev_kfree_skb_any(skb);
3070 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3073 ATM_SKB(skb)->vcc = vcc;
3075 if (skb_peek(&iadev->tx_backlog)) {
3076 skb_queue_tail(&iadev->tx_backlog, skb);
3079 if (ia_pkt_tx (vcc, skb)) {
3080 skb_queue_tail(&iadev->tx_backlog, skb);
3083 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3088 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3092 IADEV *iadev = INPH_IA_DEV(dev);
3094 if (iadev->phy_type == FE_25MBIT_PHY) {
3095 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3098 if (iadev->phy_type == FE_DS3_PHY)
3099 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3100 else if (iadev->phy_type == FE_E3_PHY)
3101 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3102 else if (iadev->phy_type == FE_UTP_OPTION)
3103 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3105 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3107 if (iadev->pci_map_size == 0x40000)
3108 n += sprintf(tmpPtr, "-1KVC-");
3110 n += sprintf(tmpPtr, "-4KVC-");
3112 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3113 n += sprintf(tmpPtr, "1M \n");
3114 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3115 n += sprintf(tmpPtr, "512K\n");
3117 n += sprintf(tmpPtr, "128K\n");
3121 return sprintf(page, " Number of Tx Buffer: %u\n"
3122 " Size of Tx Buffer : %u\n"
3123 " Number of Rx Buffer: %u\n"
3124 " Size of Rx Buffer : %u\n"
3125 " Packets Receiverd : %u\n"
3126 " Packets Transmitted: %u\n"
3127 " Cells Received : %u\n"
3128 " Cells Transmitted : %u\n"
3129 " Board Dropped Cells: %u\n"
3130 " Board Dropped Pkts : %u\n",
3131 iadev->num_tx_desc, iadev->tx_buf_sz,
3132 iadev->num_rx_desc, iadev->rx_buf_sz,
3133 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3134 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3135 iadev->drop_rxcell, iadev->drop_rxpkt);
3140 static const struct atmdev_ops ops = {
3144 .getsockopt = ia_getsockopt,
3145 .setsockopt = ia_setsockopt,
3147 .phy_put = ia_phy_put,
3148 .phy_get = ia_phy_get,
3149 .change_qos = ia_change_qos,
3150 .proc_read = ia_proc_read,
3151 .owner = THIS_MODULE,
3154 static int __devinit ia_init_one(struct pci_dev *pdev,
3155 const struct pci_device_id *ent)
3157 struct atm_dev *dev;
3159 unsigned long flags;
3162 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3170 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3171 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3172 if (pci_enable_device(pdev)) {
3174 goto err_out_free_iadev;
3176 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3179 goto err_out_disable_dev;
3181 dev->dev_data = iadev;
3182 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3183 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3186 pci_set_drvdata(pdev, dev);
3188 ia_dev[iadev_count] = iadev;
3189 _ia_dev[iadev_count] = dev;
3191 spin_lock_init(&iadev->misc_lock);
3192 /* First fixes first. I don't want to think about this now. */
3193 spin_lock_irqsave(&iadev->misc_lock, flags);
3194 if (ia_init(dev) || ia_start(dev)) {
3195 IF_INIT(printk("IA register failed!\n");)
3197 ia_dev[iadev_count] = NULL;
3198 _ia_dev[iadev_count] = NULL;
3199 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3201 goto err_out_deregister_dev;
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3204 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3206 iadev->next_board = ia_boards;
3211 err_out_deregister_dev:
3212 atm_dev_deregister(dev);
3213 err_out_disable_dev:
3214 pci_disable_device(pdev);
3221 static void __devexit ia_remove_one(struct pci_dev *pdev)
3223 struct atm_dev *dev = pci_get_drvdata(pdev);
3224 IADEV *iadev = INPH_IA_DEV(dev);
3226 /* Disable phy interrupts */
3227 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3231 if (dev->phy && dev->phy->stop)
3232 dev->phy->stop(dev);
3234 /* De-register device */
3235 free_irq(iadev->irq, dev);
3237 ia_dev[iadev_count] = NULL;
3238 _ia_dev[iadev_count] = NULL;
3239 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3240 atm_dev_deregister(dev);
3242 iounmap(iadev->base);
3243 pci_disable_device(pdev);
3251 static struct pci_device_id ia_pci_tbl[] = {
3252 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3253 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3256 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3258 static struct pci_driver ia_driver = {
3260 .id_table = ia_pci_tbl,
3261 .probe = ia_init_one,
3262 .remove = __devexit_p(ia_remove_one),
3265 static int __init ia_module_init(void)
3269 ret = pci_register_driver(&ia_driver);
3271 ia_timer.expires = jiffies + 3*HZ;
3272 add_timer(&ia_timer);
3274 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3278 static void __exit ia_module_exit(void)
3280 pci_unregister_driver(&ia_driver);
3282 del_timer(&ia_timer);
3285 module_init(ia_module_init);
3286 module_exit(ia_module_exit);