mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
net: txgbe: fix to control VLAN strip
When VLAN tag strip is changed to enable or disable, the hardware requires
the Rx ring to be in a disabled state, otherwise the feature cannot be
changed.
Fixes: f3b03c655f ("net: wangxun: Implement vlan add and kill functions")
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
ac71ab7816
commit
1d3c641495
@@ -1958,6 +1958,8 @@ int wx_sw_init(struct wx *wx)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bitmap_zero(wx->state, WX_STATE_NBITS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wx_sw_init);
|
EXPORT_SYMBOL(wx_sw_init);
|
||||||
|
|||||||
@@ -2692,9 +2692,9 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
|
|||||||
|
|
||||||
netdev->features = features;
|
netdev->features = features;
|
||||||
|
|
||||||
if (changed &
|
if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||||
(NETIF_F_HW_VLAN_CTAG_RX |
|
wx->do_reset(netdev);
|
||||||
NETIF_F_HW_VLAN_STAG_RX))
|
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
|
||||||
wx_set_rx_mode(netdev);
|
wx_set_rx_mode(netdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -982,8 +982,13 @@ struct wx_hw_stats {
|
|||||||
u64 qmprc;
|
u64 qmprc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum wx_state {
|
||||||
|
WX_STATE_RESETTING,
|
||||||
|
WX_STATE_NBITS, /* must be last */
|
||||||
|
};
|
||||||
struct wx {
|
struct wx {
|
||||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||||
|
DECLARE_BITMAP(state, WX_STATE_NBITS);
|
||||||
|
|
||||||
void *priv;
|
void *priv;
|
||||||
u8 __iomem *hw_addr;
|
u8 __iomem *hw_addr;
|
||||||
@@ -1071,6 +1076,8 @@ struct wx {
|
|||||||
u64 hw_csum_rx_good;
|
u64 hw_csum_rx_good;
|
||||||
u64 hw_csum_rx_error;
|
u64 hw_csum_rx_error;
|
||||||
u64 alloc_rx_buff_failed;
|
u64 alloc_rx_buff_failed;
|
||||||
|
|
||||||
|
void (*do_reset)(struct net_device *netdev);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define WX_INTR_ALL (~0ULL)
|
#define WX_INTR_ALL (~0ULL)
|
||||||
@@ -1131,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
|
|||||||
return container_of(config, struct wx, phylink_config);
|
return container_of(config, struct wx, phylink_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int wx_set_state_reset(struct wx *wx)
|
||||||
|
{
|
||||||
|
u8 timeout = 50;
|
||||||
|
|
||||||
|
while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
|
||||||
|
timeout--;
|
||||||
|
if (!timeout)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _WX_TYPE_H_ */
|
#endif /* _WX_TYPE_H_ */
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
|
|||||||
struct wx *wx = netdev_priv(netdev);
|
struct wx *wx = netdev_priv(netdev);
|
||||||
u32 new_rx_count, new_tx_count;
|
u32 new_rx_count, new_tx_count;
|
||||||
struct wx_ring *temp_ring;
|
struct wx_ring *temp_ring;
|
||||||
int i;
|
int i, err = 0;
|
||||||
|
|
||||||
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
|
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
|
||||||
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
|
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
|
||||||
@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
|
|||||||
new_rx_count == wx->rx_ring_count)
|
new_rx_count == wx->rx_ring_count)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err = wx_set_state_reset(wx);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!netif_running(wx->netdev)) {
|
if (!netif_running(wx->netdev)) {
|
||||||
for (i = 0; i < wx->num_tx_queues; i++)
|
for (i = 0; i < wx->num_tx_queues; i++)
|
||||||
wx->tx_ring[i]->count = new_tx_count;
|
wx->tx_ring[i]->count = new_tx_count;
|
||||||
@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
|
|||||||
wx->tx_ring_count = new_tx_count;
|
wx->tx_ring_count = new_tx_count;
|
||||||
wx->rx_ring_count = new_rx_count;
|
wx->rx_ring_count = new_rx_count;
|
||||||
|
|
||||||
return 0;
|
goto clear_reset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate temporary buffer to store rings in */
|
/* allocate temporary buffer to store rings in */
|
||||||
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
|
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
|
||||||
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
|
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
|
||||||
if (!temp_ring)
|
if (!temp_ring) {
|
||||||
return -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
goto clear_reset;
|
||||||
|
}
|
||||||
|
|
||||||
ngbe_down(wx);
|
ngbe_down(wx);
|
||||||
|
|
||||||
@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
|
|||||||
wx_configure(wx);
|
wx_configure(wx);
|
||||||
ngbe_up(wx);
|
ngbe_up(wx);
|
||||||
|
|
||||||
return 0;
|
clear_reset:
|
||||||
|
clear_bit(WX_STATE_RESETTING, wx->state);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ngbe_set_channels(struct net_device *dev,
|
static int ngbe_set_channels(struct net_device *dev,
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
|
|||||||
struct wx *wx = netdev_priv(netdev);
|
struct wx *wx = netdev_priv(netdev);
|
||||||
u32 new_rx_count, new_tx_count;
|
u32 new_rx_count, new_tx_count;
|
||||||
struct wx_ring *temp_ring;
|
struct wx_ring *temp_ring;
|
||||||
int i;
|
int i, err = 0;
|
||||||
|
|
||||||
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
|
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
|
||||||
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
|
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
|
||||||
@@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
|
|||||||
new_rx_count == wx->rx_ring_count)
|
new_rx_count == wx->rx_ring_count)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err = wx_set_state_reset(wx);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!netif_running(wx->netdev)) {
|
if (!netif_running(wx->netdev)) {
|
||||||
for (i = 0; i < wx->num_tx_queues; i++)
|
for (i = 0; i < wx->num_tx_queues; i++)
|
||||||
wx->tx_ring[i]->count = new_tx_count;
|
wx->tx_ring[i]->count = new_tx_count;
|
||||||
@@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
|
|||||||
wx->tx_ring_count = new_tx_count;
|
wx->tx_ring_count = new_tx_count;
|
||||||
wx->rx_ring_count = new_rx_count;
|
wx->rx_ring_count = new_rx_count;
|
||||||
|
|
||||||
return 0;
|
goto clear_reset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate temporary buffer to store rings in */
|
/* allocate temporary buffer to store rings in */
|
||||||
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
|
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
|
||||||
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
|
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
|
||||||
if (!temp_ring)
|
if (!temp_ring) {
|
||||||
return -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
goto clear_reset;
|
||||||
|
}
|
||||||
|
|
||||||
txgbe_down(wx);
|
txgbe_down(wx);
|
||||||
|
|
||||||
@@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
|
|||||||
|
|
||||||
txgbe_up(wx);
|
txgbe_up(wx);
|
||||||
|
|
||||||
return 0;
|
clear_reset:
|
||||||
|
clear_bit(WX_STATE_RESETTING, wx->state);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int txgbe_set_channels(struct net_device *dev,
|
static int txgbe_set_channels(struct net_device *dev,
|
||||||
|
|||||||
@@ -269,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx)
|
|||||||
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
|
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
|
||||||
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
|
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
|
||||||
|
|
||||||
|
wx->do_reset = txgbe_do_reset;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -421,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void txgbe_reinit_locked(struct wx *wx)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
netif_trans_update(wx->netdev);
|
||||||
|
|
||||||
|
err = wx_set_state_reset(wx);
|
||||||
|
if (err) {
|
||||||
|
wx_err(wx, "wait device reset timeout\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
txgbe_down(wx);
|
||||||
|
txgbe_up(wx);
|
||||||
|
|
||||||
|
clear_bit(WX_STATE_RESETTING, wx->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void txgbe_do_reset(struct net_device *netdev)
|
||||||
|
{
|
||||||
|
struct wx *wx = netdev_priv(netdev);
|
||||||
|
|
||||||
|
if (netif_running(netdev))
|
||||||
|
txgbe_reinit_locked(wx);
|
||||||
|
else
|
||||||
|
txgbe_reset(wx);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct net_device_ops txgbe_netdev_ops = {
|
static const struct net_device_ops txgbe_netdev_ops = {
|
||||||
.ndo_open = txgbe_open,
|
.ndo_open = txgbe_open,
|
||||||
.ndo_stop = txgbe_close,
|
.ndo_stop = txgbe_close,
|
||||||
|
|||||||
@@ -134,6 +134,7 @@ extern char txgbe_driver_name[];
|
|||||||
void txgbe_down(struct wx *wx);
|
void txgbe_down(struct wx *wx);
|
||||||
void txgbe_up(struct wx *wx);
|
void txgbe_up(struct wx *wx);
|
||||||
int txgbe_setup_tc(struct net_device *dev, u8 tc);
|
int txgbe_setup_tc(struct net_device *dev, u8 tc);
|
||||||
|
void txgbe_do_reset(struct net_device *netdev);
|
||||||
|
|
||||||
#define NODE_PROP(_NAME, _PROP) \
|
#define NODE_PROP(_NAME, _PROP) \
|
||||||
(const struct software_node) { \
|
(const struct software_node) { \
|
||||||
|
|||||||
Reference in New Issue
Block a user