]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 20:03:32 +0000 (12:03 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 20:03:32 +0000 (12:03 -0800)
Pull rdma updates from Doug Ledford:
 "This is the complete update for the rdma stack for this release cycle.

  Most of it is typical driver and core updates, but there is the
  entirely new VMWare pvrdma driver. You may have noticed that there
  were changes in DaveM's pull request to the bnxt Ethernet driver to
  support a RoCE RDMA driver. The bnxt_re driver was tentatively set to
  be pulled in this release cycle, but it simply wasn't ready in time
  and was dropped (a few review comments still to address, and some
  multi-arch build issues like prefetch() not working across all
  arches).

  Summary:

   - shared mlx5 updates with net stack (will drop out on merge if
     Dave's tree has already been merged)

   - driver updates: cxgb4, hfi1, hns-roce, i40iw, mlx4, mlx5, qedr, rxe

   - debug cleanups

   - new connection rejection helpers

   - SRP updates

   - various misc fixes

   - new paravirt driver from vmware"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (210 commits)
  IB: Add vmw_pvrdma driver
  IB/mlx4: fix improper return value
  IB/ocrdma: fix bad initialization
  infiniband: nes: return value of skb_linearize should be handled
  MAINTAINERS: Update Intel RDMA RNIC driver maintainers
  MAINTAINERS: Remove Mitesh Ahuja from emulex maintainers
  IB/core: fix unmap_sg argument
  qede: fix general protection fault may occur on probe
  IB/mthca: Replace pci_pool_alloc by pci_pool_zalloc
  mlx5, calc_sq_size(): Make a debug message more informative
  mlx5: Remove a set-but-not-used variable
  mlx5: Use { } instead of { 0 } to init struct
  IB/srp: Make writing the add_target sysfs attr interruptible
  IB/srp: Make mapping failures easier to debug
  IB/srp: Make login failures easier to debug
  IB/srp: Introduce a local variable in srp_add_one()
  IB/srp: Fix CONFIG_DYNAMIC_DEBUG=n build
  IB/multicast: Check ib_find_pkey() return value
  IPoIB: Avoid reading an uninitialized member variable
  IB/mad: Fix an array index check
  ...

35 files changed:
1  2 
MAINTAINERS
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/affinity.h
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/nvme/host/rdma.c
drivers/nvme/target/rdma.c
include/linux/mlx5/mlx5_ifc.h
include/linux/pci_ids.h

diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
index 3a64a0881882eb98a033d3a82296ad9b36ac600d,c86ddcea76757a9660fa2e3a3b25ecdb5e324cec..0621f4455732d83496b1dde0201912985f1ff8de
@@@ -437,28 -434,6 +434,26 @@@ static void callback_for_addr_gid_devic
                          &parsed->gid_attr);
  }
  
-       if (!entry) {
-               pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
 +struct upper_list {
 +      struct list_head list;
 +      struct net_device *upper;
 +};
 +
 +static int netdev_upper_walk(struct net_device *upper, void *data)
 +{
 +      struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 +      struct list_head *upper_list = data;
 +
-       }
++      if (!entry)
 +              return 0;
 +
 +      list_add_tail(&entry->list, upper_list);
 +      dev_hold(upper);
 +      entry->upper = upper;
 +
 +      return 0;
 +}
 +
  static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
                                void *cookie,
                                void (*handle_netdev)(struct ib_device *ib_dev,
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index cc87fd4e534bbefd1235be4c010c5df4be9c065c,4163596ce4c91a34d83a5f7e51454b8b84c892ba..751a0fb29fa57bb6546dc483496fa5ff309517de
@@@ -1112,15 -1126,17 +1102,17 @@@ struct hfi1_devdata 
        u64 sw_cce_err_status_aggregate;
        /* Software counter that aggregates all bypass packet rcv errors */
        u64 sw_rcv_bypass_packet_errors;
-       /* receive interrupt functions */
-       rhf_rcv_function_ptr *rhf_rcv_function_map;
+       /* receive interrupt function */
        rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
  
+       /* Save the enabled LCB error bits */
+       u64 lcb_err_en;
        /*
 -       * Handlers for outgoing data so that snoop/capture does not
 -       * have to have its hooks in the send path
 +       * Capability to have different send engines simply by changing a
 +       * pointer value.
         */
-       send_routine process_pio_send;
+       send_routine process_pio_send ____cacheline_aligned_in_smp;
        send_routine process_dma_send;
        void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
                                u64 pbc, const void *from, size_t count);
        spinlock_t aspm_lock;
        /* Number of verbs contexts which have disabled ASPM */
        atomic_t aspm_disabled_cnt;
 +      /* Keeps track of user space clients */
 +      atomic_t user_refcount;
 +      /* Used to wait for outstanding user space clients before dev removal */
 +      struct completion user_comp;
  
-       struct hfi1_affinity *affinity;
+       bool eprom_available;   /* true if EPROM is available for this device */
+       bool aspm_supported;    /* Does HW support ASPM */
+       bool aspm_enabled;      /* ASPM state: enabled/disabled */
        struct rhashtable sdma_rht;
        struct kobject kobj;
  };
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 2be65ddf56ba8ba311cce5497258caf030bfc47a,b81736d625fc554cb70ea3da28bb389e85a78f93..d566f673883348f58a2d9b0672313aa5560af80c
@@@ -3128,9 -3217,9 +3220,9 @@@ static void *mlx5_ib_add(struct mlx5_co
        spin_lock_init(&dev->reset_flow_resource_lock);
  
        if (ll == IB_LINK_LAYER_ETHERNET) {
-               err = mlx5_enable_roce(dev);
+               err = mlx5_enable_eth(dev);
                if (err)
 -                      goto err_dealloc;
 +                      goto err_free_port;
        }
  
        err = create_dev_resources(&dev->devr);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 22bd9630dcd924315012019ff0a39242d7476a59,b246653cf713e7fd5b5d507426d308e55ea3bf9c..73d4a97603a1ded959b115072341911120e17da9
@@@ -696,8 -698,7 +698,9 @@@ next_wqe
                                                       qp->req.wqe_index);
                        wqe->state = wqe_state_done;
                        wqe->status = IB_WC_SUCCESS;
 -                      goto complete;
 +                      __rxe_do_task(&qp->comp.task);
++                      rxe_drop_ref(qp);
 +                      return 0;
                }
                payload = mtu;
        }
@@@ -746,18 -747,15 +749,17 @@@ err
        wqe->status = IB_WC_LOC_PROT_ERR;
        wqe->state = wqe_state_error;
  
 -complete:
 -      if (qp_type(qp) != IB_QPT_RC) {
 -              while (rxe_completer(qp) == 0)
 -                      ;
 -      }
 -      rxe_drop_ref(qp);
 -      return 0;
 -
 +      /*
 +       * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
 +       * ---------8<---------8<-------------
 +       * ...Note that if a completion error occurs, a Work Completion
 +       * will always be generated, even if the signaling
 +       * indicator requests an Unsignaled Completion.
 +       * ---------8<---------8<-------------
 +       */
 +      wqe->wr.send_flags |= IB_SEND_SIGNALED;
 +      __rxe_do_task(&qp->comp.task);
-       return -EAGAIN;
  exit:
+       rxe_drop_ref(qp);
        return -EAGAIN;
  }
Simple merge
Simple merge
Simple merge
Simple merge