]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'stable/for-linus-3.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Apr 2012 00:54:53 +0000 (17:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Apr 2012 00:54:53 +0000 (17:54 -0700)
Pull xen fixes from Konrad Rzeszutek Wilk:
 "Two fixes for regressions:
   * one is a workaround that will be removed in v3.5 with proper fix in
     the tip/x86 tree,
   * the other is to fix drivers to load on PV (a previous patch made
     them only load in PVonHVM mode).

  The rest are just minor fixes in the various drivers and some cleanup
  in the core code."

* tag 'stable/for-linus-3.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen/pcifront: avoid pci_frontend_enable_msix() falsely returning success
  xen/pciback: fix XEN_PCI_OP_enable_msix result
  xen/smp: Remove unnecessary call to smp_processor_id()
  xen/x86: Workaround 'x86/ioapic: Add register level checks to detect bogus io-apic entries'
  xen: only check xen_platform_pci_unplug if hvm

1  2 
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
drivers/block/xen-blkfront.c
drivers/net/xen-netfront.c
drivers/pci/xen-pcifront.c

diff --combined arch/x86/xen/mmu.c
index 988828b479ed29660363f87adda4c6c62707441b,91dc2871e3367ffb0b46a55281f9c68853aa99a5..b8e279479a6b31984a7e715c52eec4283e38e007
@@@ -415,13 -415,13 +415,13 @@@ static pteval_t iomap_pte(pteval_t val
  static pteval_t xen_pte_val(pte_t pte)
  {
        pteval_t pteval = pte.pte;
 -
 +#if 0
        /* If this is a WC pte, convert back from Xen WC to Linux WC */
        if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
                WARN_ON(!pat_enabled);
                pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
        }
 -
 +#endif
        if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
                return pteval;
  
@@@ -463,7 -463,7 +463,7 @@@ void xen_set_pat(u64 pat
  static pte_t xen_make_pte(pteval_t pte)
  {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
 -
 +#if 0
        /* If Linux is trying to set a WC pte, then map to the Xen WC.
         * If _PAGE_PAT is set, then it probably means it is really
         * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
                if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
                        pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
        }
 -
 +#endif
        /*
         * Unprivileged domains are allowed to do IOMAPpings for
         * PCI passthrough, but not map ISA space.  The ISA
@@@ -1859,6 -1859,7 +1859,7 @@@ pgd_t * __init xen_setup_kernel_pagetab
  #endif        /* CONFIG_X86_64 */
  
  static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+ static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss;
  
  static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  {
                 * We just don't map the IO APIC - all access is via
                 * hypercalls.  Keep the address in the pte for reference.
                 */
-               pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+               pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL);
                break;
  #endif
  
@@@ -2064,6 -2065,7 +2065,7 @@@ void __init xen_init_mmu_ops(void
        pv_mmu_ops = xen_mmu_ops;
  
        memset(dummy_mapping, 0xff, PAGE_SIZE);
+       memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE);
  }
  
  /* Protected by xen_reservation_lock. */
diff --combined arch/x86/xen/smp.c
index 02900e8ce26cecba1e02bab8d4f2fdb48888d823,e845555ff486445cce568e366aa09266ca90dc3e..5fac6919b957fa88a9b9cc130007671ffbc00d13
@@@ -59,7 -59,7 +59,7 @@@ static irqreturn_t xen_reschedule_inter
  
  static void __cpuinit cpu_bringup(void)
  {
-       int cpu = smp_processor_id();
+       int cpu;
  
        cpu_init();
        touch_softlockup_watchdog();
@@@ -415,13 -415,6 +415,13 @@@ static void __cpuinit xen_play_dead(voi
        play_dead_common();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
        cpu_bringup();
 +      /*
 +       * Balance out the preempt calls - as we are running in cpu_idle
 +       * loop which has been called at bootup from cpu_bringup_and_idle.
 +       * The cpucpu_bringup_and_idle called cpu_bringup which made a
 +       * preempt_disable() So this preempt_enable will balance it out.
 +       */
 +      preempt_enable();
  }
  
  #else /* !CONFIG_HOTPLUG_CPU */
index d5e1ab95674044361920140550b0c6b34f9be9b3,19b6005a323edb87eef360d367e3c8e73ec86811..98cbeba8cd5358ca026b2e4b692a4aa55704c739
@@@ -98,8 -98,7 +98,8 @@@ struct blkfront_inf
        unsigned long shadow_free;
        unsigned int feature_flush;
        unsigned int flush_op;
 -      unsigned int feature_discard;
 +      unsigned int feature_discard:1;
 +      unsigned int feature_secdiscard:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
        int is_ready;
@@@ -136,15 -135,15 +136,15 @@@ static int get_id_from_freelist(struct 
  {
        unsigned long free = info->shadow_free;
        BUG_ON(free >= BLK_RING_SIZE);
 -      info->shadow_free = info->shadow[free].req.id;
 -      info->shadow[free].req.id = 0x0fffffee; /* debug */
 +      info->shadow_free = info->shadow[free].req.u.rw.id;
 +      info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
        return free;
  }
  
  static void add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
  {
 -      info->shadow[id].req.id  = info->shadow_free;
 +      info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
  }
@@@ -157,7 -156,7 +157,7 @@@ static int xlbd_reserve_minors(unsigne
        if (end > nr_minors) {
                unsigned long *bitmap, *old;
  
 -              bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
 +              bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
                                 GFP_KERNEL);
                if (bitmap == NULL)
                        return -ENOMEM;
@@@ -288,9 -287,9 +288,9 @@@ static int blkif_queue_request(struct r
        id = get_id_from_freelist(info);
        info->shadow[id].request = req;
  
 -      ring_req->id = id;
 +      ring_req->u.rw.id = id;
        ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
 -      ring_req->handle = info->handle;
 +      ring_req->u.rw.handle = info->handle;
  
        ring_req->operation = rq_data_dir(req) ?
                BLKIF_OP_WRITE : BLKIF_OP_READ;
                ring_req->operation = info->flush_op;
        }
  
 -      if (unlikely(req->cmd_flags & REQ_DISCARD)) {
 +      if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
                /* id, sector_number and handle are set above. */
                ring_req->operation = BLKIF_OP_DISCARD;
 -              ring_req->nr_segments = 0;
                ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
 +              if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
 +                      ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
 +              else
 +                      ring_req->u.discard.flag = 0;
        } else {
 -              ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
 -              BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
 +              ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
 +                                                         info->sg);
 +              BUG_ON(ring_req->u.rw.nr_segments >
 +                     BLKIF_MAX_SEGMENTS_PER_REQUEST);
  
 -              for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
 +              for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
                        buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
                        fsect = sg->offset >> 9;
                        lsect = fsect + (sg->length >> 9) - 1;
@@@ -430,8 -424,6 +430,8 @@@ static int xlvbd_init_blk_queue(struct 
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
                rq->limits.discard_granularity = info->discard_granularity;
                rq->limits.discard_alignment = info->discard_alignment;
 +              if (info->feature_secdiscard)
 +                      queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
        }
  
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@@ -713,9 -705,7 +713,9 @@@ static void blkif_free(struct blkfront_
  static void blkif_completion(struct blk_shadow *s)
  {
        int i;
 -      for (i = 0; i < s->req.nr_segments; i++)
 +      /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
 +       * flag. */
 +      for (i = 0; i < s->req.u.rw.nr_segments; i++)
                gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
  }
  
@@@ -746,8 -736,7 +746,8 @@@ static irqreturn_t blkif_interrupt(int 
                id   = bret->id;
                req  = info->shadow[id].request;
  
 -              blkif_completion(&info->shadow[id]);
 +              if (bret->operation != BLKIF_OP_DISCARD)
 +                      blkif_completion(&info->shadow[id]);
  
                add_id_to_freelist(info, id);
  
                                           info->gd->disk_name);
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
 +                              info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
 +                              queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
                        __blk_end_request_all(req, error);
                        break;
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
 -                                   info->shadow[id].req.nr_segments == 0)) {
 +                                   info->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
                                       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
                                       "barrier" :  "flush disk cache",
@@@ -997,8 -984,8 +997,8 @@@ static int blkfront_probe(struct xenbus
        INIT_WORK(&info->work, blkif_restart_queue);
  
        for (i = 0; i < BLK_RING_SIZE; i++)
 -              info->shadow[i].req.id = i+1;
 -      info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
 +              info->shadow[i].req.u.rw.id = i+1;
 +      info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@@ -1032,9 -1019,9 +1032,9 @@@ static int blkif_recover(struct blkfron
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
        for (i = 0; i < BLK_RING_SIZE; i++)
 -              info->shadow[i].req.id = i+1;
 +              info->shadow[i].req.u.rw.id = i+1;
        info->shadow_free = info->ring.req_prod_pvt;
 -      info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
 +      info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  
        /* Stage 3: Find pending requests and requeue them. */
        for (i = 0; i < BLK_RING_SIZE; i++) {
                *req = copy[i].req;
  
                /* We get a new request id, and must reset the shadow state. */
 -              req->id = get_id_from_freelist(info);
 -              memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
 +              req->u.rw.id = get_id_from_freelist(info);
 +              memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
  
 +              if (req->operation != BLKIF_OP_DISCARD) {
                /* Rewrite any grant references invalidated by susp/resume. */
 -              for (j = 0; j < req->nr_segments; j++)
 -                      gnttab_grant_foreign_access_ref(
 -                              req->u.rw.seg[j].gref,
 -                              info->xbdev->otherend_id,
 -                              pfn_to_mfn(info->shadow[req->id].frame[j]),
 -                              rq_data_dir(info->shadow[req->id].request));
 -              info->shadow[req->id].req = *req;
 +                      for (j = 0; j < req->u.rw.nr_segments; j++)
 +                              gnttab_grant_foreign_access_ref(
 +                                      req->u.rw.seg[j].gref,
 +                                      info->xbdev->otherend_id,
 +                                      pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
 +                                      rq_data_dir(info->shadow[req->u.rw.id].request));
 +              }
 +              info->shadow[req->u.rw.id].req = *req;
  
                info->ring.req_prod_pvt++;
        }
@@@ -1150,13 -1135,11 +1150,13 @@@ static void blkfront_setup_discard(stru
        char *type;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
 +      unsigned int discard_secure;
  
        type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
        if (IS_ERR(type))
                return;
  
 +      info->feature_secdiscard = 0;
        if (strncmp(type, "phy", 3) == 0) {
                err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
                        "discard-granularity", "%u", &discard_granularity,
                        info->discard_granularity = discard_granularity;
                        info->discard_alignment = discard_alignment;
                }
 +              err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
 +                          "discard-secure", "%d", &discard_secure,
 +                          NULL);
 +              if (!err)
 +                      info->feature_secdiscard = discard_secure;
 +
        } else if (strncmp(type, "file", 4) == 0)
                info->feature_discard = 1;
  
@@@ -1475,7 -1452,7 +1475,7 @@@ static int __init xlblk_init(void
        if (!xen_domain())
                return -ENODEV;
  
-       if (!xen_platform_pci_unplug)
+       if (xen_hvm_domain() && !xen_platform_pci_unplug)
                return -ENODEV;
  
        if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
index 663b32c2e93185f60b391b2706cbc847e4c8ffd2,ccba19c72a364159db09dfa8368eb5a3770ecfd2..0ebbb1906c308ba69ff0cff0125a84562e85116d
@@@ -69,7 -69,7 +69,7 @@@ struct netfront_cb 
  
  #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
 -#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 +#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
  
  struct netfront_stats {
        u64                     rx_packets;
@@@ -490,7 -490,6 +490,7 @@@ static int xennet_start_xmit(struct sk_
        int frags = skb_shinfo(skb)->nr_frags;
        unsigned int offset = offset_in_page(data);
        unsigned int len = skb_headlen(skb);
 +      unsigned long flags;
  
        frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
        if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
                goto drop;
        }
  
 -      spin_lock_irq(&np->tx_lock);
 +      spin_lock_irqsave(&np->tx_lock, flags);
  
        if (unlikely(!netif_carrier_ok(dev) ||
                     (frags > 1 && !xennet_can_sg(dev)) ||
                     netif_needs_gso(skb, netif_skb_features(skb)))) {
 -              spin_unlock_irq(&np->tx_lock);
 +              spin_unlock_irqrestore(&np->tx_lock, flags);
                goto drop;
        }
  
        if (!netfront_tx_slot_available(np))
                netif_stop_queue(dev);
  
 -      spin_unlock_irq(&np->tx_lock);
 +      spin_unlock_irqrestore(&np->tx_lock, flags);
  
        return NETDEV_TX_OK;
  
@@@ -1230,33 -1229,6 +1230,33 @@@ static int xennet_set_features(struct n
        return 0;
  }
  
 +static irqreturn_t xennet_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct netfront_info *np = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&np->tx_lock, flags);
 +
 +      if (likely(netif_carrier_ok(dev))) {
 +              xennet_tx_buf_gc(dev);
 +              /* Under tx_lock: protects access to rx shared-ring indexes. */
 +              if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 +                      napi_schedule(&np->napi);
 +      }
 +
 +      spin_unlock_irqrestore(&np->tx_lock, flags);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void xennet_poll_controller(struct net_device *dev)
 +{
 +      xennet_interrupt(0, dev);
 +}
 +#endif
 +
  static const struct net_device_ops xennet_netdev_ops = {
        .ndo_open            = xennet_open,
        .ndo_uninit          = xennet_uninit,
        .ndo_validate_addr   = eth_validate_addr,
        .ndo_fix_features    = xennet_fix_features,
        .ndo_set_features    = xennet_set_features,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller = xennet_poll_controller,
 +#endif
  };
  
  static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
        struct netfront_info *np;
  
        netdev = alloc_etherdev(sizeof(struct netfront_info));
 -      if (!netdev) {
 -              printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
 -                     __func__);
 +      if (!netdev)
                return ERR_PTR(-ENOMEM);
 -      }
  
        np                   = netdev_priv(netdev);
        np->xbdev            = dev;
@@@ -1477,6 -1449,26 +1477,6 @@@ static int xen_net_read_mac(struct xenb
        return 0;
  }
  
 -static irqreturn_t xennet_interrupt(int irq, void *dev_id)
 -{
 -      struct net_device *dev = dev_id;
 -      struct netfront_info *np = netdev_priv(dev);
 -      unsigned long flags;
 -
 -      spin_lock_irqsave(&np->tx_lock, flags);
 -
 -      if (likely(netif_carrier_ok(dev))) {
 -              xennet_tx_buf_gc(dev);
 -              /* Under tx_lock: protects access to rx shared-ring indexes. */
 -              if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 -                      napi_schedule(&np->napi);
 -      }
 -
 -      spin_unlock_irqrestore(&np->tx_lock, flags);
 -
 -      return IRQ_HANDLED;
 -}
 -
  static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
  {
        struct xen_netif_tx_sring *txs;
@@@ -1965,7 -1957,7 +1965,7 @@@ static int __init netif_init(void
        if (xen_initial_domain())
                return 0;
  
-       if (!xen_platform_pci_unplug)
+       if (xen_hvm_domain() && !xen_platform_pci_unplug)
                return -ENODEV;
  
        printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
index fd00ff02ab4d0c51cf7b6eabe6b2a5ce9880fb12,c18fab289ad56a57d974a5567bf03a4b4733dec8..d6cc62cb4cf7465fe22cb643fbde7a3f79f88a29
@@@ -189,7 -189,7 +189,7 @@@ static int pcifront_bus_read(struct pci
  
        if (verbose_request)
                dev_info(&pdev->xdev->dev,
 -                       "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
 +                       "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
                         pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
                         PCI_FUNC(devfn), where, size);
  
@@@ -228,7 -228,7 +228,7 @@@ static int pcifront_bus_write(struct pc
  
        if (verbose_request)
                dev_info(&pdev->xdev->dev,
 -                       "write dev=%04x:%02x:%02x.%01x - "
 +                       "write dev=%04x:%02x:%02x.%d - "
                         "offset %x size %d val %x\n",
                         pci_domain_nr(bus), bus->number,
                         PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
@@@ -290,6 -290,7 +290,7 @@@ static int pci_frontend_enable_msix(str
                } else {
                        printk(KERN_DEBUG "enable msix get value %x\n",
                                op.value);
+                       err = op.value;
                }
        } else {
                dev_err(&dev->dev, "enable msix get err %x\n", err);
@@@ -432,7 -433,7 +433,7 @@@ static int __devinit pcifront_scan_bus(
                d = pci_scan_single_device(b, devfn);
                if (d)
                        dev_info(&pdev->xdev->dev, "New device on "
 -                               "%04x:%02x:%02x.%02x found.\n", domain, bus,
 +                               "%04x:%02x:%02x.%d found.\n", domain, bus,
                                 PCI_SLOT(devfn), PCI_FUNC(devfn));
        }
  
@@@ -544,7 -545,7 +545,7 @@@ static void free_root_bus_devs(struct p
                dev = container_of(bus->devices.next, struct pci_dev,
                                   bus_list);
                dev_dbg(&dev->dev, "removing device\n");
 -              pci_remove_bus_device(dev);
 +              pci_stop_and_remove_bus_device(dev);
        }
  }
  
@@@ -593,7 -594,7 +594,7 @@@ static pci_ers_result_t pcifront_common
        }
        pdrv = pcidev->driver;
  
 -      if (get_driver(&pdrv->driver)) {
 +      if (pdrv) {
                if (pdrv->err_handler && pdrv->err_handler->error_detected) {
                        dev_dbg(&pcidev->dev,
                                "trying to call AER service\n");
                                }
                        }
                }
 -              put_driver(&pdrv->driver);
        }
        if (!flag)
                result = PCI_ERS_RESULT_NONE;
@@@ -1040,15 -1042,15 +1041,15 @@@ static int pcifront_detach_devices(stru
                pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
                if (!pci_dev) {
                        dev_dbg(&pdev->xdev->dev,
 -                              "Cannot get PCI device %04x:%02x:%02x.%02x\n",
 +                              "Cannot get PCI device %04x:%02x:%02x.%d\n",
                                domain, bus, slot, func);
                        continue;
                }
 -              pci_remove_bus_device(pci_dev);
 +              pci_stop_and_remove_bus_device(pci_dev);
                pci_dev_put(pci_dev);
  
                dev_dbg(&pdev->xdev->dev,
 -                      "PCI device %04x:%02x:%02x.%02x removed.\n",
 +                      "PCI device %04x:%02x:%02x.%d removed.\n",
                        domain, bus, slot, func);
        }