1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
30 /* On Hyper-V, to reset, we need to read from this offset
31 * from the PCI config space. This is the mechanism used on
32 * Hyper-V to support PF/VF communication.
34 #define IXGBE_HV_RESET_OFFSET 0x201
37 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
38 * @hw: pointer to hardware structure
40 * Starts the hardware by filling the bus info structure and media type, clears
41 * all on chip counters, initializes receive address registers, multicast
42 * table, VLAN filter table, calls routine to set up link and flow control
43 * settings, and leaves transmit and receive units disabled and uninitialized
45 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
47 /* Clear adapter stopped flag */
48 hw->adapter_stopped = false;
54 * ixgbevf_init_hw_vf - virtual function hardware initialization
55 * @hw: pointer to hardware structure
57 * Initialize the hardware by resetting the hardware and then starting
60 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
62 s32 status = hw->mac.ops.start_hw(hw);
64 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
70 * ixgbevf_reset_hw_vf - Performs hardware reset
71 * @hw: pointer to hardware structure
73 * Resets the hardware by resetting the transmit and receive units, masks and
74 * clears all interrupts.
76 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
78 struct ixgbe_mbx_info *mbx = &hw->mbx;
79 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
80 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
81 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
82 u8 *addr = (u8 *)(&msgbuf[1]);
84 /* Call adapter stop to disable tx/rx and clear interrupts */
85 hw->mac.ops.stop_adapter(hw);
87 /* reset the api version */
88 hw->api_version = ixgbe_mbox_api_10;
90 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
91 IXGBE_WRITE_FLUSH(hw);
93 /* we cannot reset while the RSTI / RSTD bits are asserted */
94 while (!mbx->ops.check_for_rst(hw) && timeout) {
100 return IXGBE_ERR_RESET_FAILED;
102 /* mailbox timeout can now become active */
103 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
105 msgbuf[0] = IXGBE_VF_RESET;
106 mbx->ops.write_posted(hw, msgbuf, 1);
110 /* set our "perm_addr" based on info provided by PF
111 * also set up the mc_filter_type which is piggy backed
112 * on the mac address in word 3
114 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
118 /* New versions of the PF may NACK the reset return message
119 * to indicate that no MAC address has yet been assigned for
122 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
123 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
124 return IXGBE_ERR_INVALID_MAC_ADDR;
126 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
127 ether_addr_copy(hw->mac.perm_addr, addr);
129 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
135 * Hyper-V variant; the VF/PF communication is through the PCI
138 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
140 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
141 struct ixgbevf_adapter *adapter = hw->back;
144 for (i = 0; i < 6; i++)
145 pci_read_config_byte(adapter->pdev,
146 (i + IXGBE_HV_RESET_OFFSET),
147 &hw->mac.perm_addr[i]);
150 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
156 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
157 * @hw: pointer to hardware structure
159 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
160 * disables transmit and receive units. The adapter_stopped flag is used by
161 * the shared code and drivers to determine if the adapter is in a stopped
162 * state and should not touch the hardware.
164 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
166 u32 number_of_queues;
170 /* Set the adapter_stopped flag so other driver functions stop touching
173 hw->adapter_stopped = true;
175 /* Disable the receive unit by stopped each queue */
176 number_of_queues = hw->mac.max_rx_queues;
177 for (i = 0; i < number_of_queues; i++) {
178 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
179 if (reg_val & IXGBE_RXDCTL_ENABLE) {
180 reg_val &= ~IXGBE_RXDCTL_ENABLE;
181 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
185 IXGBE_WRITE_FLUSH(hw);
187 /* Clear interrupt mask to stop from interrupts being generated */
188 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
190 /* Clear any pending interrupts */
191 IXGBE_READ_REG(hw, IXGBE_VTEICR);
193 /* Disable the transmit unit. Each queue must be disabled. */
194 number_of_queues = hw->mac.max_tx_queues;
195 for (i = 0; i < number_of_queues; i++) {
196 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
197 if (reg_val & IXGBE_TXDCTL_ENABLE) {
198 reg_val &= ~IXGBE_TXDCTL_ENABLE;
199 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
207 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
208 * @hw: pointer to hardware structure
209 * @mc_addr: the multicast address
211 * Extracts the 12 bits, from a multicast address, to determine which
212 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
213 * incoming Rx multicast addresses, to determine the bit-vector to check in
214 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
215 * by the MO field of the MCSTCTRL. The MO field is set during initialization
218 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
222 switch (hw->mac.mc_filter_type) {
223 case 0: /* use bits [47:36] of the address */
224 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
226 case 1: /* use bits [46:35] of the address */
227 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
229 case 2: /* use bits [45:34] of the address */
230 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
232 case 3: /* use bits [43:32] of the address */
233 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
235 default: /* Invalid mc_filter_type */
239 /* vector can only be 12-bits or boundary will be exceeded */
245 * ixgbevf_get_mac_addr_vf - Read device MAC address
246 * @hw: pointer to the HW structure
247 * @mac_addr: pointer to storage for retrieved MAC address
249 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
251 ether_addr_copy(mac_addr, hw->mac.perm_addr);
256 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
260 u8 *msg_addr = (u8 *)(&msgbuf[1]);
263 memset(msgbuf, 0, sizeof(msgbuf));
264 /* If index is one then this is the start of a new list and needs
265 * indication to the PF so it can do it's own list management.
266 * If it is zero then that tells the PF to just clear all of
267 * this VF's macvlans and there is no new list.
269 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
270 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
272 ether_addr_copy(msg_addr, addr);
273 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
276 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
278 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
282 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
288 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
294 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
295 * @adapter: pointer to the port handle
296 * @reta: buffer to fill with RETA contents.
297 * @num_rx_queues: Number of Rx queues configured for this port
299 * The "reta" buffer should be big enough to contain 32 registers.
301 * Returns: 0 on success.
302 * if API doesn't support this operation - (-EOPNOTSUPP).
304 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
307 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
308 u32 *hw_reta = &msgbuf[1];
311 /* We have to use a mailbox for 82599 and x540 devices only.
312 * For these devices RETA has 128 entries.
313 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
314 * 16 RETA entries in each DWORD giving 2 bits to each entry.
316 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
318 /* We support the RSS querying for 82599 and x540 devices only.
319 * Thus return an error if API doesn't support RETA querying or querying
320 * is not supported for this device type.
322 if (hw->api_version != ixgbe_mbox_api_12 ||
323 hw->mac.type >= ixgbe_mac_X550_vf)
326 msgbuf[0] = IXGBE_VF_GET_RETA;
328 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
333 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
338 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
340 /* If the operation has been refused by a PF return -EPERM */
341 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
344 /* If we didn't get an ACK there must have been
345 * some sort of mailbox error so we should treat it
348 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
349 return IXGBE_ERR_MBX;
351 /* ixgbevf doesn't support more than 2 queues at the moment */
352 if (num_rx_queues > 1)
355 for (i = 0; i < dwords; i++)
356 for (j = 0; j < 16; j++)
357 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
363 * ixgbevf_get_rss_key_locked - get the RSS Random Key
364 * @hw: pointer to the HW structure
365 * @rss_key: buffer to fill with RSS Hash Key contents.
367 * The "rss_key" buffer should be big enough to contain 10 registers.
369 * Returns: 0 on success.
370 * if API doesn't support this operation - (-EOPNOTSUPP).
372 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
375 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
377 /* We currently support the RSS Random Key retrieval for 82599 and x540
380 * Thus return an error if API doesn't support RSS Random Key retrieval
381 * or if the operation is not supported for this device type.
383 if (hw->api_version != ixgbe_mbox_api_12 ||
384 hw->mac.type >= ixgbe_mac_X550_vf)
387 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
388 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
393 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
398 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
400 /* If the operation has been refused by a PF return -EPERM */
401 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
404 /* If we didn't get an ACK there must have been
405 * some sort of mailbox error so we should treat it
408 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
409 return IXGBE_ERR_MBX;
411 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
417 * ixgbevf_set_rar_vf - set device MAC address
418 * @hw: pointer to hardware structure
419 * @index: Receive address register to write
420 * @addr: Address to put into receive address register
421 * @vmdq: Unused in this implementation
423 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
426 struct ixgbe_mbx_info *mbx = &hw->mbx;
428 u8 *msg_addr = (u8 *)(&msgbuf[1]);
431 memset(msgbuf, 0, sizeof(msgbuf));
432 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
433 ether_addr_copy(msg_addr, addr);
434 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
437 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
439 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
441 /* if nacked the address was rejected, use "perm_addr" */
443 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
444 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
445 return IXGBE_ERR_MBX;
452 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
453 * @hw: pointer to hardware structure
454 * @index: Receive address register to write
455 * @addr: Address to put into receive address register
456 * @vmdq: Unused in this implementation
458 * We don't really allow setting the device MAC address. However,
459 * if the address being set is the permanent MAC address we will
462 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
465 if (ether_addr_equal(addr, hw->mac.perm_addr))
471 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
474 struct ixgbe_mbx_info *mbx = &hw->mbx;
475 u32 retmsg[IXGBE_VFMAILBOX_SIZE];
476 s32 retval = mbx->ops.write_posted(hw, msg, size);
479 mbx->ops.read_posted(hw, retmsg, size);
483 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
484 * @hw: pointer to the HW structure
485 * @netdev: pointer to net device structure
487 * Updates the Multicast Table Array.
489 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
490 struct net_device *netdev)
492 struct netdev_hw_addr *ha;
493 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
494 u16 *vector_list = (u16 *)&msgbuf[1];
497 /* Each entry in the list uses 1 16 bit word. We have 30
498 * 16 bit words available in our HW msg buffer (minus 1 for the
499 * msg type). That's 30 hash values if we pack 'em right. If
500 * there are more than 30 MC addresses to add then punt the
501 * extras for now and then add code to handle more than 30 later.
502 * It would be unusual for a server to request that many multi-cast
503 * addresses except for in large enterprise network environments.
506 cnt = netdev_mc_count(netdev);
509 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
510 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
513 netdev_for_each_mc_addr(ha, netdev) {
516 if (is_link_local_ether_addr(ha->addr))
519 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
522 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
528 * Hyper-V variant - just a stub.
530 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
531 struct net_device *netdev)
537 * ixgbevf_update_xcast_mode - Update Multicast mode
538 * @hw: pointer to the HW structure
539 * @netdev: pointer to net device structure
540 * @xcast_mode: new multicast mode
542 * Updates the Multicast Mode of VF.
544 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
545 struct net_device *netdev, int xcast_mode)
547 struct ixgbe_mbx_info *mbx = &hw->mbx;
551 switch (hw->api_version) {
552 case ixgbe_mbox_api_12:
558 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
559 msgbuf[1] = xcast_mode;
561 err = mbx->ops.write_posted(hw, msgbuf, 2);
565 err = mbx->ops.read_posted(hw, msgbuf, 2);
569 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
570 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
577 * Hyper-V variant - just a stub.
579 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw,
580 struct net_device *netdev,
587 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
588 * @hw: pointer to the HW structure
589 * @vlan: 12 bit VLAN ID
590 * @vind: unused by VF drivers
591 * @vlan_on: if true then set bit, else clear bit
593 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
596 struct ixgbe_mbx_info *mbx = &hw->mbx;
600 msgbuf[0] = IXGBE_VF_SET_VLAN;
602 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
603 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
605 err = mbx->ops.write_posted(hw, msgbuf, 2);
609 err = mbx->ops.read_posted(hw, msgbuf, 2);
613 /* remove extra bits from the message */
614 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
615 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
617 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
618 err = IXGBE_ERR_INVALID_ARGUMENT;
625 * Hyper-V variant - just a stub.
627 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
634 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
635 * @hw: pointer to hardware structure
636 * @speed: Unused in this implementation
637 * @autoneg: Unused in this implementation
638 * @autoneg_wait_to_complete: Unused in this implementation
640 * Do nothing and return success. VF drivers are not allowed to change
641 * global settings. Maintained for driver compatibility.
643 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
644 ixgbe_link_speed speed, bool autoneg,
645 bool autoneg_wait_to_complete)
651 * ixgbevf_check_mac_link_vf - Get link/speed status
652 * @hw: pointer to hardware structure
653 * @speed: pointer to link speed
654 * @link_up: true is link is up, false otherwise
655 * @autoneg_wait_to_complete: true when waiting for completion is needed
657 * Reads the links register to determine if link is up and the current speed
659 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
660 ixgbe_link_speed *speed,
662 bool autoneg_wait_to_complete)
664 struct ixgbe_mbx_info *mbx = &hw->mbx;
665 struct ixgbe_mac_info *mac = &hw->mac;
670 /* If we were hit with a reset drop the link */
671 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
672 mac->get_link_status = true;
674 if (!mac->get_link_status)
677 /* if link status is down no point in checking to see if pf is up */
678 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
679 if (!(links_reg & IXGBE_LINKS_UP))
682 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
683 * before the link status is correct
685 if (mac->type == ixgbe_mac_82599_vf) {
688 for (i = 0; i < 5; i++) {
690 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
692 if (!(links_reg & IXGBE_LINKS_UP))
697 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
698 case IXGBE_LINKS_SPEED_10G_82599:
699 *speed = IXGBE_LINK_SPEED_10GB_FULL;
701 case IXGBE_LINKS_SPEED_1G_82599:
702 *speed = IXGBE_LINK_SPEED_1GB_FULL;
704 case IXGBE_LINKS_SPEED_100_82599:
705 *speed = IXGBE_LINK_SPEED_100_FULL;
709 /* if the read failed it could just be a mailbox collision, best wait
710 * until we are called again and don't report an error
712 if (mbx->ops.read(hw, &in_msg, 1))
715 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
716 /* msg is not CTS and is NACK we must have lost CTS status */
717 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
722 /* the pf is talking, if we timed out in the past we reinit */
728 /* if we passed all the tests above then the link is up and we no
729 * longer need to check for link
731 mac->get_link_status = false;
734 *link_up = !mac->get_link_status;
739 * Hyper-V variant; there is no mailbox communication.
741 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
742 ixgbe_link_speed *speed,
744 bool autoneg_wait_to_complete)
746 struct ixgbe_mbx_info *mbx = &hw->mbx;
747 struct ixgbe_mac_info *mac = &hw->mac;
750 /* If we were hit with a reset drop the link */
751 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
752 mac->get_link_status = true;
754 if (!mac->get_link_status)
757 /* if link status is down no point in checking to see if pf is up */
758 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
759 if (!(links_reg & IXGBE_LINKS_UP))
762 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
763 * before the link status is correct
765 if (mac->type == ixgbe_mac_82599_vf) {
768 for (i = 0; i < 5; i++) {
770 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
772 if (!(links_reg & IXGBE_LINKS_UP))
777 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
778 case IXGBE_LINKS_SPEED_10G_82599:
779 *speed = IXGBE_LINK_SPEED_10GB_FULL;
781 case IXGBE_LINKS_SPEED_1G_82599:
782 *speed = IXGBE_LINK_SPEED_1GB_FULL;
784 case IXGBE_LINKS_SPEED_100_82599:
785 *speed = IXGBE_LINK_SPEED_100_FULL;
789 /* if we passed all the tests above then the link is up and we no
790 * longer need to check for link
792 mac->get_link_status = false;
795 *link_up = !mac->get_link_status;
800 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
801 * @hw: pointer to the HW structure
802 * @max_size: value to assign to max frame size
804 static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
808 msgbuf[0] = IXGBE_VF_SET_LPE;
809 msgbuf[1] = max_size;
810 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
814 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
815 * @hw: pointer to the HW structure
816 * @max_size: value to assign to max frame size
819 static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
823 /* If we are on Hyper-V, we implement this functionality
826 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
828 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
829 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
833 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
834 * @hw: pointer to the HW structure
835 * @api: integer containing requested API version
837 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
842 /* Negotiate the mailbox API version */
843 msg[0] = IXGBE_VF_API_NEGOTIATE;
846 err = hw->mbx.ops.write_posted(hw, msg, 3);
849 err = hw->mbx.ops.read_posted(hw, msg, 3);
852 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
854 /* Store value and return 0 on success */
855 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
856 hw->api_version = api;
860 err = IXGBE_ERR_INVALID_ARGUMENT;
867 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
868 * @hw: pointer to the HW structure
869 * @api: integer containing requested API version
870 * Hyper-V version - only ixgbe_mbox_api_10 supported.
872 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
874 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
875 if (api != ixgbe_mbox_api_10)
876 return IXGBE_ERR_INVALID_ARGUMENT;
881 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
882 unsigned int *default_tc)
887 /* do nothing if API doesn't support ixgbevf_get_queues */
888 switch (hw->api_version) {
889 case ixgbe_mbox_api_11:
890 case ixgbe_mbox_api_12:
896 /* Fetch queue configuration from the PF */
897 msg[0] = IXGBE_VF_GET_QUEUE;
898 msg[1] = msg[2] = msg[3] = msg[4] = 0;
899 err = hw->mbx.ops.write_posted(hw, msg, 5);
902 err = hw->mbx.ops.read_posted(hw, msg, 5);
905 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
907 /* if we we didn't get an ACK there must have been
908 * some sort of mailbox error so we should treat it
911 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
912 return IXGBE_ERR_MBX;
914 /* record and validate values from message */
915 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
916 if (hw->mac.max_tx_queues == 0 ||
917 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
918 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
920 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
921 if (hw->mac.max_rx_queues == 0 ||
922 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
923 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
925 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
926 /* in case of unknown state assume we cannot tag frames */
927 if (*num_tcs > hw->mac.max_rx_queues)
930 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
931 /* default to queue 0 on out-of-bounds queue number */
932 if (*default_tc >= hw->mac.max_tx_queues)
939 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
940 .init_hw = ixgbevf_init_hw_vf,
941 .reset_hw = ixgbevf_reset_hw_vf,
942 .start_hw = ixgbevf_start_hw_vf,
943 .get_mac_addr = ixgbevf_get_mac_addr_vf,
944 .stop_adapter = ixgbevf_stop_hw_vf,
945 .setup_link = ixgbevf_setup_mac_link_vf,
946 .check_link = ixgbevf_check_mac_link_vf,
947 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
948 .set_rar = ixgbevf_set_rar_vf,
949 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
950 .update_xcast_mode = ixgbevf_update_xcast_mode,
951 .set_uc_addr = ixgbevf_set_uc_addr_vf,
952 .set_vfta = ixgbevf_set_vfta_vf,
953 .set_rlpml = ixgbevf_set_rlpml_vf,
956 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
957 .init_hw = ixgbevf_init_hw_vf,
958 .reset_hw = ixgbevf_hv_reset_hw_vf,
959 .start_hw = ixgbevf_start_hw_vf,
960 .get_mac_addr = ixgbevf_get_mac_addr_vf,
961 .stop_adapter = ixgbevf_stop_hw_vf,
962 .setup_link = ixgbevf_setup_mac_link_vf,
963 .check_link = ixgbevf_hv_check_mac_link_vf,
964 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
965 .set_rar = ixgbevf_hv_set_rar_vf,
966 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
967 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
968 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
969 .set_vfta = ixgbevf_hv_set_vfta_vf,
970 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
973 const struct ixgbevf_info ixgbevf_82599_vf_info = {
974 .mac = ixgbe_mac_82599_vf,
975 .mac_ops = &ixgbevf_mac_ops,
978 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
979 .mac = ixgbe_mac_82599_vf,
980 .mac_ops = &ixgbevf_hv_mac_ops,
983 const struct ixgbevf_info ixgbevf_X540_vf_info = {
984 .mac = ixgbe_mac_X540_vf,
985 .mac_ops = &ixgbevf_mac_ops,
988 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
989 .mac = ixgbe_mac_X540_vf,
990 .mac_ops = &ixgbevf_hv_mac_ops,
993 const struct ixgbevf_info ixgbevf_X550_vf_info = {
994 .mac = ixgbe_mac_X550_vf,
995 .mac_ops = &ixgbevf_mac_ops,
998 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
999 .mac = ixgbe_mac_X550_vf,
1000 .mac_ops = &ixgbevf_hv_mac_ops,
1003 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1004 .mac = ixgbe_mac_X550EM_x_vf,
1005 .mac_ops = &ixgbevf_mac_ops,
1008 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1009 .mac = ixgbe_mac_X550EM_x_vf,
1010 .mac_ops = &ixgbevf_hv_mac_ops,