Merge tag 'net-6.18-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from bluetooth and CAN. No known outstanding
  regressions.

  Current release - regressions:

   - mptcp: initialize rcv_mss before calling tcp_send_active_reset()

   - eth: mlx5e: fix validation logic in rate limiting

  Previous releases - regressions:

   - xsk: avoid data corruption on cq descriptor number

   - bluetooth:
       - prevent race in socket write iter and sock bind
       - fix not generating mackey and ltk when repairing

   - can:
       - kvaser_usb: fix potential infinite loop in command parsers
       - rcar_canfd: fix CAN-FD mode as default

   - eth:
       - veth: reduce XDP no_direct return section to fix race
       - virtio-net: avoid unnecessary checksum calculation on guest RX

  Previous releases - always broken:

   - sched: fix TCF_LAYER_TRANSPORT handling in tcf_get_base_ptr()

   - bluetooth: mediatek: fix kernel crash when releasing iso interface

   - vhost: rewind next_avail_head while discarding descriptors

   - eth:
       - r8169: fix RTL8127 hang on suspend/shutdown
       - aquantia: add missing descriptor cache invalidation on ATL2

   - dsa: microchip: fix resource releases in error path"

* tag 'net-6.18-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (47 commits)
  mptcp: Initialise rcv_mss before calling tcp_send_active_reset() in mptcp_do_fastclose().
  net: fec: do not register PPS event for PEROUT
  net: fec: do not allow enabling PPS and PEROUT simultaneously
  net: fec: do not update PEROUT if it is enabled
  net: fec: cancel perout_timer when PEROUT is disabled
  net: mctp: unconditionally set skb->dev on dst output
  net: atlantic: fix fragment overflow handling in RX path
  MAINTAINERS: separate VIRTIO NET DRIVER and add netdev
  virtio-net: avoid unnecessary checksum calculation on guest RX
  eth: fbnic: Fix counter roll-over issue
  mptcp: clear scheduled subflows on retransmit
  net: dsa: sja1105: fix SGMII linking at 10M or 100M but not passing traffic
  s390/net: list Aswin Karuvally as maintainer
  net: wwan: mhi: Keep modem name match with Foxconn T99W640
  vhost: rewind next_avail_head while discarding descriptors
  net/sched: em_canid: fix uninit-value in em_canid_match
  can: rcar_canfd: Fix CAN-FD mode as default
  xsk: avoid data corruption on cq descriptor number
  r8169: fix RTL8127 hang on suspend/shutdown
  net: sxgbe: fix potential NULL dereference in sxgbe_rx()
  ...
This commit is contained in:
Linus Torvalds
2025-11-27 09:18:40 -08:00
49 changed files with 701 additions and 354 deletions

View File

@@ -22655,7 +22655,7 @@ F: arch/s390/mm
S390 NETWORK DRIVERS
M: Alexandra Winter <wintera@linux.ibm.com>
R: Aswin Karuvally <aswin@linux.ibm.com>
M: Aswin Karuvally <aswin@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@@ -27122,7 +27122,7 @@ S: Maintained
F: drivers/char/virtio_console.c
F: include/uapi/linux/virtio_console.h
VIRTIO CORE AND NET DRIVERS
VIRTIO CORE
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
@@ -27135,7 +27135,6 @@ F: Documentation/devicetree/bindings/virtio/
F: Documentation/driver-api/virtio/
F: drivers/block/virtio_blk.c
F: drivers/crypto/virtio/
F: drivers/net/virtio_net.c
F: drivers/vdpa/
F: drivers/virtio/
F: include/linux/vdpa.h
@@ -27144,7 +27143,6 @@ F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
F: net/vmw_vsock/virtio*
F: tools/virtio/
F: tools/testing/selftests/drivers/net/virtio_net/
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
@@ -27256,6 +27254,19 @@ W: https://virtio-mem.gitlab.io/
F: drivers/virtio/virtio_mem.c
F: include/uapi/linux/virtio_mem.h
VIRTIO NET DRIVER
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
R: Eugenio Pérez <eperezma@redhat.com>
L: netdev@vger.kernel.org
L: virtualization@lists.linux.dev
S: Maintained
F: drivers/net/virtio_net.c
F: include/linux/virtio_net.h
F: include/uapi/linux/virtio_net.h
F: tools/testing/selftests/drivers/net/virtio_net/
VIRTIO PMEM DRIVER
M: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
L: virtualization@lists.linux.dev

View File

@@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
vcc->dev_data = NULL;
mutex_lock(&fore200e->rate_mtx);
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
mutex_unlock(&fore200e->rate_mtx);
kfree(fore200e_vcc);
return -EINVAL;

View File

@@ -2711,9 +2711,21 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
struct btmtk_data *btmtk_data;
int err;
if (!data->hdev)
return;
btmtk_data = hci_get_priv(data->hdev);
if (!btmtk_data)
return;
if (!btmtk_data->isopkt_intf) {
bt_dev_err(data->hdev, "Can't claim NULL iso interface");
return;
}
/*
* The function usb_driver_claim_interface() is documented to need
* locks held if it's not called from a probe routine. The code here
@@ -2735,17 +2747,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
struct btmtk_data *btmtk_data;
if (!hdev)
return;
btmtk_data = hci_get_priv(hdev);
if (!btmtk_data)
return;
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
dev_kfree_skb_irq(btmtk_data->isopkt_skb);
btmtk_data->isopkt_skb = NULL;
usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
usb_driver_release_interface(&btusb_driver,
btmtk_data->isopkt_intf);
if (btmtk_data->isopkt_skb) {
dev_kfree_skb_irq(btmtk_data->isopkt_skb);
btmtk_data->isopkt_skb = NULL;
}
if (btmtk_data->isopkt_intf) {
usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
usb_driver_release_interface(&btusb_driver,
btmtk_data->isopkt_intf);
btmtk_data->isopkt_intf = NULL;
}
}
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);

View File

@@ -709,6 +709,11 @@ static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
rcar_canfd_update(val, val, addr);
}
static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
{
rcar_canfd_update(val, 0, addr);
}
static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
{
rcar_canfd_update(mask, val, addr);
@@ -755,25 +760,6 @@ static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
}
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
if (gpriv->info->ch_interface_mode) {
u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
: RCANFD_GEN4_FDCFG_CLOE;
for_each_set_bit(ch, &gpriv->channels_mask,
gpriv->info->max_channels)
rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
RCANFD_GRMCFG_RCMC);
else
rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
RCANFD_GRMCFG_RCMC);
}
}
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
struct device *dev = &gpriv->pdev->dev;
@@ -806,6 +792,16 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
/* Reset Global error flags */
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
if (!gpriv->info->ch_interface_mode) {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
RCANFD_GRMCFG_RCMC);
else
rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
RCANFD_GRMCFG_RCMC);
}
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
@@ -823,10 +819,23 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
}
/* Set the controller into appropriate mode */
rcar_canfd_set_mode(gpriv);
/* Set the controller into appropriate mode */
if (gpriv->info->ch_interface_mode) {
/* Do not set CLOE and FDOE simultaneously */
if (!gpriv->fdmode) {
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_FDOE);
rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_CLOE);
} else {
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_FDOE);
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_CLOE);
}
}
}
return 0;
}

View File

@@ -548,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
goto out;
while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
(n < SJA1000_MAX_IRQ)) {
while ((n < SJA1000_MAX_IRQ) &&
(isrc = priv->read_reg(priv, SJA1000_IR))) {
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */

View File

@@ -657,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
u8 isrc, status;
int n = 0;
while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
(n < SUN4I_CAN_MAX_IRQ)) {
while ((n < SUN4I_CAN_MAX_IRQ) &&
(isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
n++;
status = readl(priv->base + SUN4I_REG_STA_ADDR);

View File

@@ -261,14 +261,21 @@ struct canfd_quirk {
u8 quirk;
} __packed;
struct gs_host_frame {
u32 echo_id;
__le32 can_id;
/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
* a regular RX'ed CAN frame
*/
#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
u8 can_dlc;
u8 channel;
u8 flags;
u8 reserved;
struct gs_host_frame {
struct_group(header,
u32 echo_id;
__le32 can_id;
u8 can_dlc;
u8 channel;
u8 flags;
u8 reserved;
);
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
@@ -568,6 +575,37 @@ gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
return len;
}
static unsigned int
gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
unsigned int *data_length_p)
{
unsigned int minimum_length, data_length = 0;
if (hf->flags & GS_CAN_FLAG_FD) {
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
data_length = can_fd_dlc2len(hf->can_dlc);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
/* timestamp follows data field of max size */
minimum_length = struct_size(hf, canfd_ts, 1);
else
minimum_length = sizeof(hf->header) + data_length;
} else {
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
!(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
data_length = can_cc_dlc2len(hf->can_dlc);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
/* timestamp follows data field of max size */
minimum_length = struct_size(hf, classic_can_ts, 1);
else
minimum_length = sizeof(hf->header) + data_length;
}
*data_length_p = data_length;
return minimum_length;
}
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
@@ -576,6 +614,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
unsigned int minimum_length, data_length;
struct gs_tx_context *txc;
struct can_frame *cf;
struct canfd_frame *cfd;
@@ -594,6 +633,15 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
}
minimum_length = sizeof(hf->header);
if (urb->actual_length < minimum_length) {
dev_err_ratelimited(&parent->udev->dev,
"short read (actual_length=%u, minimum_length=%u)\n",
urb->actual_length, minimum_length);
goto resubmit_urb;
}
/* device reports out of range channel id */
if (hf->channel >= parent->channel_cnt)
goto device_detach;
@@ -609,20 +657,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!netif_running(netdev))
goto resubmit_urb;
if (hf->echo_id == -1) { /* normal rx */
minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
if (urb->actual_length < minimum_length) {
stats->rx_errors++;
stats->rx_length_errors++;
if (net_ratelimit())
netdev_err(netdev,
"short read (actual_length=%u, minimum_length=%u)\n",
urb->actual_length, minimum_length);
goto resubmit_urb;
}
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
if (hf->flags & GS_CAN_FLAG_FD) {
skb = alloc_canfd_skb(netdev, &cfd);
if (!skb)
return;
cfd->can_id = le32_to_cpu(hf->can_id);
cfd->len = can_fd_dlc2len(hf->can_dlc);
cfd->len = data_length;
if (hf->flags & GS_CAN_FLAG_BRS)
cfd->flags |= CANFD_BRS;
if (hf->flags & GS_CAN_FLAG_ESI)
cfd->flags |= CANFD_ESI;
memcpy(cfd->data, hf->canfd->data, cfd->len);
memcpy(cfd->data, hf->canfd->data, data_length);
} else {
skb = alloc_can_skb(netdev, &cf);
if (!skb)
@@ -631,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id = le32_to_cpu(hf->can_id);
can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
memcpy(cf->data, hf->classic_can->data, 8);
memcpy(cf->data, hf->classic_can->data, data_length);
/* ERROR frames tell us information about the controller */
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
@@ -687,7 +748,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, parent->udev,
parent->pipe_in,
hf, dev->parent->hf_size_rx,
hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -750,8 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct gs_can *dev = txc->dev;
struct net_device *netdev = dev->netdev;
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
if (!urb->status)
return;
if (urb->status != -ESHUTDOWN && net_ratelimit())
netdev_info(netdev, "failed to xmit URB %u: %pe\n",
txc->echo_id, ERR_PTR(urb->status));
netdev->stats.tx_dropped++;
netdev->stats.tx_errors++;
can_free_echo_skb(netdev, txc->echo_id, NULL);
gs_free_tx_context(txc);
atomic_dec(&dev->active_tx_urbs);
netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,

View File

@@ -685,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
pos = round_up(pos,
pos = round_up(pos + 1,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
@@ -1732,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
pos = round_up(pos, le16_to_cpu
pos = round_up(pos + 1, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}

View File

@@ -2587,8 +2587,8 @@ static int ksz_irq_phy_setup(struct ksz_device *dev)
irq = irq_find_mapping(dev->ports[port].pirq.domain,
PORT_SRC_PHY_INT);
if (irq < 0) {
ret = irq;
if (!irq) {
ret = -EINVAL;
goto out;
}
ds->user_mii_bus->irq[phy] = irq;
@@ -2952,8 +2952,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
if (pirq->irq_num < 0)
return pirq->irq_num;
if (!pirq->irq_num)
return -EINVAL;
return ksz_irq_common_setup(dev, pirq);
}
@@ -3038,12 +3038,12 @@ static int ksz_setup(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, dev->ds) {
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
goto out_girq;
goto port_release;
if (dev->info->ptp_capable) {
ret = ksz_ptp_irq_setup(ds, dp->index);
if (ret)
goto out_pirq;
goto pirq_release;
}
}
}
@@ -3053,7 +3053,7 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret) {
dev_err(dev->dev, "Failed to register PTP clock: %d\n",
ret);
goto out_ptpirq;
goto port_release;
}
}
@@ -3076,17 +3076,16 @@ static int ksz_setup(struct dsa_switch *ds)
out_ptp_clock_unregister:
if (dev->info->ptp_capable)
ksz_ptp_clock_unregister(ds);
out_ptpirq:
if (dev->irq > 0 && dev->info->ptp_capable)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_ptp_irq_free(ds, dp->index);
out_pirq:
if (dev->irq > 0)
dsa_switch_for_each_user_port(dp, dev->ds)
port_release:
if (dev->irq > 0) {
dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
if (dev->info->ptp_capable)
ksz_ptp_irq_free(ds, dp->index);
pirq_release:
ksz_irq_free(&dev->ports[dp->index].pirq);
out_girq:
if (dev->irq > 0)
}
ksz_irq_free(&dev->girq);
}
return ret;
}

View File

@@ -1093,19 +1093,19 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
"sync-msg"};
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
struct ksz_irq *ptpirq = &port->ptpirq;
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
if (!ptpmsg_irq->num)
return -EINVAL;
ptpmsg_irq->port = port;
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
strscpy(ptpmsg_irq->name, name[n]);
ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
if (ptpmsg_irq->num < 0)
return ptpmsg_irq->num;
return request_threaded_irq(ptpmsg_irq->num, NULL,
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
ptpmsg_irq->name, ptpmsg_irq);
@@ -1135,12 +1135,9 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
if (!ptpirq->domain)
return -ENOMEM;
for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_create_mapping(ptpirq->domain, irq);
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
if (ptpirq->irq_num < 0) {
ret = ptpirq->irq_num;
if (!ptpirq->irq_num) {
ret = -EINVAL;
goto out;
}
@@ -1159,12 +1156,11 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
out_ptp_msg:
free_irq(ptpirq->irq_num, ptpirq);
while (irq--)
while (irq--) {
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
out:
for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
}
out:
irq_domain_remove(ptpirq->domain);
return ret;

View File

@@ -1302,14 +1302,7 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
* Actually for the SGMII port, the MAC is fixed at 1 Gbps and
* we need to configure the PCS only (if even that).
*/
if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
mac[port].speed = speed;
return 0;

View File

@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
{
int err;
u32 val;
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
err = aq_hw_err_from_flags(hw);
if (err)
goto err_exit;
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
hw, val, val == 1, 1000U, 10000U);
err_exit:
return err;
}
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;

View File

@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);

View File

@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
/* There will be an extra fragment */
if (buff->len > AQ_CFG_RX_HDR_SIZE)
frag_cnt++;
buff_ = buff;
do {
bool is_rsc_completed = true;

View File

@@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
int err;
u32 val;
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
err = aq_hw_err_from_flags(self);
if (err)
goto err_exit;
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
self, val, val == 1, 1000U, 10000U);
err_exit:
return err;
return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)

View File

@@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
return 0;
return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)

View File

@@ -687,6 +687,7 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
bool perout_enable;
struct hrtimer perout_timer;
u64 perout_stime;

View File

@@ -128,6 +128,12 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
spin_lock_irqsave(&fep->tmreg_lock, flags);
if (fep->perout_enable) {
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
dev_err(&fep->pdev->dev, "PEROUT is running");
return -EBUSY;
}
if (fep->pps_enable == enable) {
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
@@ -243,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
* the FEC_TCCR register in time and missed the start time.
*/
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
fep->perout_enable = false;
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -1;
@@ -497,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
{
unsigned long flags;
hrtimer_cancel(&fep->perout_timer);
spin_lock_irqsave(&fep->tmreg_lock, flags);
fep->perout_enable = false;
writel(0, fep->hwp + FEC_TCSR(channel));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -529,6 +539,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return ret;
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
u32 reload_period;
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -548,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
fep->reload_period = div_u64(period_ns, 2);
if (on && fep->reload_period) {
reload_period = div_u64(period_ns, 2);
if (on && reload_period) {
u64 perout_stime;
/* Convert 1588 timestamp to ns*/
start_time.tv_sec = rq->perout.start.sec;
start_time.tv_nsec = rq->perout.start.nsec;
fep->perout_stime = timespec64_to_ns(&start_time);
perout_stime = timespec64_to_ns(&start_time);
mutex_lock(&fep->ptp_clk_mutex);
if (!fep->ptp_clk_on) {
@@ -562,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
spin_lock_irqsave(&fep->tmreg_lock, flags);
if (fep->pps_enable) {
dev_err(&fep->pdev->dev, "PPS is running");
ret = -EBUSY;
goto unlock;
}
if (fep->perout_enable) {
dev_err(&fep->pdev->dev,
"PEROUT has been enabled\n");
ret = -EBUSY;
goto unlock;
}
/* Read current timestamp */
curr_time = timecounter_read(&fep->tc);
if (perout_stime <= curr_time) {
dev_err(&fep->pdev->dev,
"Start time must be greater than current time\n");
ret = -EINVAL;
goto unlock;
}
/* Calculate time difference */
delta = perout_stime - curr_time;
fep->reload_period = reload_period;
fep->perout_stime = perout_stime;
fep->perout_enable = true;
unlock:
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
mutex_unlock(&fep->ptp_clk_mutex);
/* Calculate time difference */
delta = fep->perout_stime - curr_time;
if (fep->perout_stime <= curr_time) {
dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
return -EINVAL;
}
if (ret)
return ret;
/* Because the timer counter of FEC only has 31-bits, correspondingly,
* the time comparison register FEC_TCCR also only low 31 bits can be
@@ -681,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
event.type = PTP_CLOCK_PPS;
ptp_clock_event(fep->ptp_clock, &event);
if (fep->pps_enable) {
event.type = PTP_CLOCK_PPS;
ptp_clock_event(fep->ptp_clock, &event);
}
return IRQ_HANDLED;
}

View File

@@ -627,7 +627,7 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
} else if (max_bw_value[i] <= upper_limit_gbps) {
} else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;

View File

@@ -201,7 +201,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
return -ENODEV;
/* Fill all but 1 unused descriptors in the Rx queue. */
count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
while (!err && count--) {
struct fbnic_tlv_msg *msg;

View File

@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
#include <linux/units.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
#define LAN9X66_CLOCK_RATE 165617754
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
@@ -1126,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
return 15125;
return PICO / LAN9X66_CLOCK_RATE;
}

View File

@@ -1514,11 +1514,20 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
tp->mac_version != RTL_GIGA_MAC_VER_28 &&
tp->mac_version != RTL_GIGA_MAC_VER_31 &&
tp->mac_version != RTL_GIGA_MAC_VER_38)
r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
case RTL_GIGA_MAC_VER_38:
break;
case RTL_GIGA_MAC_VER_80:
r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
break;
default:
r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
break;
}
}
static void rtl_reset_packet_filter(struct rtl8169_private *tp)

View File

@@ -1521,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
skb = priv->rxq[qnum]->rx_skbuff[entry];
if (unlikely(!skb))
if (unlikely(!skb)) {
netdev_err(priv->dev, "rx descriptor is not consistent\n");
break;
}
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;

View File

@@ -540,7 +540,7 @@ static int gpy_update_interface(struct phy_device *phydev)
/* Interface mode is fixed for USXGMII and integrated PHY */
if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
return -EINVAL;
return 0;
/* Automatically switch SERDES interface between SGMII and 2500-BaseX
* according to speed. Disable ANEG in 2500-BaseX mode.
@@ -578,13 +578,7 @@ static int gpy_update_interface(struct phy_device *phydev)
break;
}
if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
ret = genphy_read_master_slave(phydev);
if (ret < 0)
return ret;
}
return gpy_update_mdix(phydev);
return 0;
}
static int gpy_read_status(struct phy_device *phydev)
@@ -639,6 +633,16 @@ static int gpy_read_status(struct phy_device *phydev)
ret = gpy_update_interface(phydev);
if (ret < 0)
return ret;
if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
ret = genphy_read_master_slave(phydev);
if (ret < 0)
return ret;
}
ret = gpy_update_mdix(phydev);
if (ret < 0)
return ret;
}
return 0;

View File

@@ -1191,10 +1191,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EPERM;
}
err = team_dev_type_check_change(dev, port_dev);
if (err)
return err;
if (port_dev->flags & IFF_UP) {
NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
@@ -1212,10 +1208,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu;
err = dev_set_mtu(port_dev, dev->mtu);
if (err) {
netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
goto err_set_mtu;
/*
* MTU assignment will be handled in team_dev_type_check_change
* if dev and port_dev are of different types
*/
if (dev->type == port_dev->type) {
err = dev_set_mtu(port_dev, dev->mtu);
if (err) {
netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
goto err_set_mtu;
}
}
memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
@@ -1290,6 +1292,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
err = team_dev_type_check_change(dev, port_dev);
if (err)
goto err_set_dev_type;
if (dev->flags & IFF_UP) {
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
@@ -1308,6 +1314,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
err_set_dev_type:
err_set_slave_promisc:
__team_option_inst_del_port(team, port);

View File

@@ -244,7 +244,7 @@ tun_vnet_hdr_tnl_from_skb(unsigned int flags,
if (virtio_net_hdr_tnl_from_skb(skb, tnl_hdr, has_tnl_offload,
tun_vnet_is_little_endian(flags),
vlan_hlen)) {
vlan_hlen, true)) {
struct virtio_net_hdr_v1 *hdr = &tnl_hdr->hash_hdr.hdr;
struct skb_shared_info *sinfo = skb_shinfo(skb);

View File

@@ -975,6 +975,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (stats.xdp_redirect > 0)
xdp_do_flush();
if (stats.xdp_tx > 0)
veth_xdp_flush(rq, &bq);
xdp_clear_return_frame_no_direct();
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -987,10 +990,6 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
}
if (stats.xdp_tx > 0)
veth_xdp_flush(rq, &bq);
xdp_clear_return_frame_no_direct();
/* Release backpressure per NAPI poll */
smp_rmb(); /* Paired with netif_tx_stop_queue set_bit */
if (peer_txq && netif_tx_queue_stopped(peer_txq)) {

View File

@@ -3339,7 +3339,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
virtio_is_little_endian(vi->vdev), 0))
virtio_is_little_endian(vi->vdev), 0,
false))
return -EPROTO;
if (vi->mergeable_rx_bufs)

View File

@@ -98,7 +98,7 @@ static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim
static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
{
if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
strcmp(cntrl->name, "foxconn-t99w515") == 0)
strcmp(cntrl->name, "foxconn-t99w640") == 0)
return WDS_BIND_MUX_DATA_PORT_MUX_ID;
return 0;

View File

@@ -592,14 +592,15 @@ static void vhost_net_busy_poll(struct vhost_net *net,
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_net_virtqueue *tnvq,
unsigned int *out_num, unsigned int *in_num,
struct msghdr *msghdr, bool *busyloop_intr)
struct msghdr *msghdr, bool *busyloop_intr,
unsigned int *ndesc)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *rvq = &rnvq->vq;
struct vhost_virtqueue *tvq = &tnvq->vq;
int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
int r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL, ndesc);
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
@@ -610,8 +611,8 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL, ndesc);
}
return r;
@@ -642,12 +643,14 @@ static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
unsigned int *out, unsigned int *in,
size_t *len, bool *busyloop_intr)
size_t *len, bool *busyloop_intr,
unsigned int *ndesc)
{
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg,
busyloop_intr, ndesc);
if (ret < 0 || ret == vq->num)
return ret;
@@ -766,6 +769,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
unsigned int ndesc = 0;
do {
bool busyloop_intr = false;
@@ -774,7 +778,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
vhost_tx_batch(net, nvq, sock, &msg);
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
&busyloop_intr);
&busyloop_intr, &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -806,7 +810,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
goto done;
} else if (unlikely(err != -ENOSPC)) {
vhost_tx_batch(net, nvq, sock, &msg);
vhost_discard_vq_desc(vq, 1);
vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -829,7 +833,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
vhost_discard_vq_desc(vq, 1);
vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -868,6 +872,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
int err;
struct vhost_net_ubuf_ref *ubufs;
struct ubuf_info_msgzc *ubuf;
unsigned int ndesc = 0;
bool zcopy_used;
int sent_pkts = 0;
@@ -879,7 +884,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
busyloop_intr = false;
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
&busyloop_intr);
&busyloop_intr, &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -941,7 +946,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
if (retry) {
vhost_discard_vq_desc(vq, 1);
vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -1045,11 +1050,12 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
unsigned int quota)
unsigned int quota,
unsigned int *ndesc)
{
struct vhost_virtqueue *vq = &nvq->vq;
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
unsigned int out, in;
unsigned int out, in, desc_num, n = 0;
int seg = 0;
int headcount = 0;
unsigned d;
@@ -1064,9 +1070,9 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
r = -ENOBUFS;
goto err;
}
r = vhost_get_vq_desc(vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
r = vhost_get_vq_desc_n(vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num, &desc_num);
if (unlikely(r < 0))
goto err;
@@ -1093,6 +1099,7 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
++headcount;
datalen -= len;
seg += in;
n += desc_num;
}
*iovcount = seg;
@@ -1113,9 +1120,11 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
nheads[0] = headcount;
}
*ndesc = n;
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
vhost_discard_vq_desc(vq, headcount, n);
return r;
}
@@ -1151,6 +1160,7 @@ static void handle_rx(struct vhost_net *net)
struct iov_iter fixup;
__virtio16 num_buffers;
int recv_pkts = 0;
unsigned int ndesc;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vhost_vq_get_backend(vq);
@@ -1182,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
headcount = get_rx_bufs(nvq, vq->heads + count,
vq->nheads + count,
vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
likely(mergeable) ? UIO_MAXIOV : 1,
&ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
@@ -1228,7 +1239,7 @@ static void handle_rx(struct vhost_net *net)
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
vhost_discard_vq_desc(vq, headcount);
vhost_discard_vq_desc(vq, headcount, ndesc);
continue;
}
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
@@ -1252,7 +1263,7 @@ static void handle_rx(struct vhost_net *net)
copy_to_iter(&num_buffers, sizeof num_buffers,
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
vhost_discard_vq_desc(vq, headcount, ndesc);
goto out;
}
nvq->done_idx += headcount;

View File

@@ -2792,18 +2792,34 @@ static int get_indirect(struct vhost_virtqueue *vq,
return 0;
}
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
* iovecs, but we pack them into one and note how many of each there were.
/**
* vhost_get_vq_desc_n - Fetch the next available descriptor chain and build iovecs
* @vq: target virtqueue
* @iov: array that receives the scatter/gather segments
* @iov_size: capacity of @iov in elements
* @out_num: the number of output segments
* @in_num: the number of input segments
* @log: optional array to record addr/len for each writable segment; NULL if unused
* @log_num: optional output; number of entries written to @log when provided
* @ndesc: optional output; number of descriptors consumed from the available ring
* (useful for rollback via vhost_discard_vq_desc)
*
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
* Extracts one available descriptor chain from @vq and translates guest addresses
* into host iovecs.
*
* On success, advances @vq->last_avail_idx by 1 and @vq->next_avail_head by the
* number of descriptors consumed (also stored via @ndesc when non-NULL).
*
* Return:
* - head index in [0, @vq->num) on success;
* - @vq->num if no descriptor is currently available;
* - negative errno on failure
*/
int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
unsigned int *ndesc)
{
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
struct vring_desc desc;
@@ -2921,17 +2937,49 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
vq->last_avail_idx++;
vq->next_avail_head += c;
if (ndesc)
*ndesc = c;
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc_n);
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
* iovecs, but we pack them into one and note how many of each there were.
*
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error.
*/
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
{
return vhost_get_vq_desc_n(vq, iov, iov_size, out_num, in_num,
log, log_num, NULL);
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
/**
* vhost_discard_vq_desc - Reverse the effect of vhost_get_vq_desc_n()
* @vq: target virtqueue
* @nbufs: number of buffers to roll back
* @ndesc: number of descriptors to roll back
*
* Rewinds the internal consumer cursors after a failed attempt to use buffers
* returned by vhost_get_vq_desc_n().
*/
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int nbufs,
unsigned int ndesc)
{
vq->last_avail_idx -= n;
vq->next_avail_head -= ndesc;
vq->last_avail_idx -= nbufs;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);

View File

@@ -230,7 +230,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
unsigned int *ndesc);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int nbuf,
unsigned int ndesc);
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
bool vhost_vq_has_work(struct vhost_virtqueue *vq);

View File

@@ -384,7 +384,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr_v1_hash_tunnel *vhdr,
bool tnl_hdr_negotiated,
bool little_endian,
int vlan_hlen)
int vlan_hlen,
bool has_data_valid)
{
struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
unsigned int inner_nh, outer_th;
@@ -394,8 +395,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM);
if (!tnl_gso_type)
return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
vlan_hlen);
return virtio_net_hdr_from_skb(skb, hdr, little_endian,
has_data_valid, vlan_hlen);
/* Tunnel support not negotiated but skb ask for it. */
if (!tnl_hdr_negotiated)

View File

@@ -749,7 +749,6 @@ struct hci_conn {
__u8 remote_cap;
__u8 remote_auth;
__u8 remote_id;
unsigned int sent;
@@ -857,11 +856,12 @@ extern struct mutex hci_cb_list_lock;
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
u16 flags);
#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb);
#else
static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
@@ -869,23 +869,30 @@ static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
return 0;
}
static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
static inline int sco_recv_scodata(struct hci_dev *hdev, u16 handle,
struct sk_buff *skb)
{
kfree_skb(skb);
return -ENOENT;
}
#endif
#if IS_ENABLED(CONFIG_BT_LE)
int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
u16 flags);
#else
static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
{
return 0;
}
static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
u16 flags)
static inline int iso_recv(struct hci_dev *hdev, u16 handle,
struct sk_buff *skb, u16 flags)
{
kfree_skb(skb);
return -ENOENT;
}
#endif

View File

@@ -536,6 +536,8 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:
if (!skb_transport_header_was_set(skb))
break;
return skb_transport_header(skb);
}

View File

@@ -881,7 +881,7 @@ out_atmproc_exit:
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
atmsvc_exit();
atmpvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;

View File

@@ -3832,13 +3832,14 @@ static void hci_tx_work(struct work_struct *work)
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ACL packet too small");
goto drop;
kfree_skb(skb);
return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3850,36 +3851,27 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.acl_rx++;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (conn) {
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
l2cap_recv_acldata(conn, skb, flags);
return;
} else {
err = l2cap_recv_acldata(hdev, handle, skb, flags);
if (err == -ENOENT)
bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
handle);
}
drop:
kfree_skb(skb);
else if (err)
bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d",
handle, err);
}
/* SCO data packet */
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "SCO packet too small");
goto drop;
kfree_skb(skb);
return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3891,34 +3883,28 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.sco_rx++;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
hci_skb_pkt_status(skb) = flags & 0x03;
if (conn) {
/* Send to upper protocol */
hci_skb_pkt_status(skb) = flags & 0x03;
sco_recv_scodata(conn, skb);
return;
} else {
err = sco_recv_scodata(hdev, handle, skb);
if (err == -ENOENT)
bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
handle);
}
drop:
kfree_skb(skb);
else if (err)
bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d",
handle, err);
}
static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_iso_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ISO packet too small");
goto drop;
kfree_skb(skb);
return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3928,22 +3914,13 @@ static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
handle, flags);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (!conn) {
err = iso_recv(hdev, handle, skb, flags);
if (err == -ENOENT)
bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
handle);
goto drop;
}
/* Send to upper protocol */
iso_recv(conn, skb, flags);
return;
drop:
kfree_skb(skb);
else if (err)
bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d",
handle, err);
}
static bool hci_req_is_complete(struct hci_dev *hdev)
@@ -4121,7 +4098,7 @@ static void hci_rx_work(struct work_struct *work)
}
}
static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
{
int err;
@@ -4133,16 +4110,19 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
if (!hdev->sent_cmd) {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
return;
return -EINVAL;
}
if (hci_skb_opcode(skb) != HCI_OP_NOP) {
err = hci_send_frame(hdev, skb);
if (err < 0) {
hci_cmd_sync_cancel_sync(hdev, -err);
return;
return err;
}
atomic_dec(&hdev->cmd_cnt);
} else {
err = -ENODATA;
kfree_skb(skb);
}
if (hdev->req_status == HCI_REQ_PEND &&
@@ -4150,12 +4130,15 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
}
return err;
}
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
int err;
BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
@@ -4166,7 +4149,9 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
hci_send_cmd_sync(hdev, skb);
err = hci_send_cmd_sync(hdev, skb);
if (err)
return;
rcu_read_lock();
if (test_bit(HCI_RESET, &hdev->flags) ||

View File

@@ -1311,7 +1311,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done;
}
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
err = hci_dev_open(hdev->id);
if (err) {

View File

@@ -2314,14 +2314,31 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
iso_conn_del(hcon, bt_to_errno(reason));
}
void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb, u16 flags)
{
struct iso_conn *conn = hcon->iso_data;
struct hci_conn *hcon;
struct iso_conn *conn;
struct skb_shared_hwtstamps *hwts;
__u16 pb, ts, len, sn;
if (!conn)
goto drop;
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, handle);
if (!hcon) {
hci_dev_unlock(hdev);
kfree_skb(skb);
return -ENOENT;
}
conn = iso_conn_hold_unless_zero(hcon->iso_data);
hcon = NULL;
hci_dev_unlock(hdev);
if (!conn) {
kfree_skb(skb);
return -EINVAL;
}
pb = hci_iso_flags_pb(flags);
ts = hci_iso_flags_ts(flags);
@@ -2377,7 +2394,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hci_skb_pkt_status(skb) = flags & 0x03;
hci_skb_pkt_seqnum(skb) = sn;
iso_recv_frame(conn, skb);
return;
goto done;
}
if (pb == ISO_SINGLE) {
@@ -2455,6 +2472,9 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
drop:
kfree_skb(skb);
done:
iso_conn_put(conn);
return 0;
}
static struct hci_cb iso_cb = {

View File

@@ -7510,13 +7510,24 @@ struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
return c;
}
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
struct sk_buff *skb, u16 flags)
{
struct hci_conn *hcon;
struct l2cap_conn *conn;
int len;
/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
hci_dev_lock(hcon->hdev);
/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, handle);
if (!hcon) {
hci_dev_unlock(hdev);
kfree_skb(skb);
return -ENOENT;
}
hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
conn = hcon->l2cap_data;
@@ -7524,12 +7535,13 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
conn = l2cap_conn_add(hcon);
conn = l2cap_conn_hold_unless_zero(conn);
hcon = NULL;
hci_dev_unlock(hcon->hdev);
hci_dev_unlock(hdev);
if (!conn) {
kfree_skb(skb);
return;
return -EINVAL;
}
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
@@ -7643,6 +7655,7 @@ drop:
unlock:
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
return 0;
}
static struct hci_cb l2cap_cb = {

View File

@@ -1458,22 +1458,39 @@ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
sco_conn_del(hcon, bt_to_errno(reason));
}
void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
struct hci_conn *hcon;
struct sco_conn *conn;
if (!conn)
goto drop;
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, handle);
if (!hcon) {
hci_dev_unlock(hdev);
kfree_skb(skb);
return -ENOENT;
}
conn = sco_conn_hold_unless_zero(hcon->sco_data);
hcon = NULL;
hci_dev_unlock(hdev);
if (!conn) {
kfree_skb(skb);
return -EINVAL;
}
BT_DBG("conn %p len %u", conn, skb->len);
if (skb->len) {
if (skb->len)
sco_recv_frame(conn, skb);
return;
}
else
kfree_skb(skb);
drop:
kfree_skb(skb);
sco_conn_put(conn);
return 0;
}
static struct hci_cb sco_cb = {

View File

@@ -2136,7 +2136,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
u8 *pkax, *pkbx, *na, *nb, confirm_hint;
u32 passkey;
u32 passkey = 0;
int err;
bt_dev_dbg(hcon->hdev, "conn %p", conn);
@@ -2188,24 +2188,6 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
/* Only Just-Works pairing requires extra checks */
if (smp->method != JUST_WORKS)
goto mackey_and_ltk;
/* If there already exists long term key in local host, leave
* the decision to user space since the remote device could
* be legitimate or malicious.
*/
if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
hcon->role)) {
/* Set passkey to 0. The value can be any number since
* it'll be ignored anyway.
*/
passkey = 0;
confirm_hint = 1;
goto confirm;
}
}
mackey_and_ltk:
@@ -2226,11 +2208,12 @@ mackey_and_ltk:
if (err)
return SMP_UNSPECIFIED;
confirm_hint = 0;
confirm:
if (smp->method == JUST_WORKS)
confirm_hint = 1;
/* Always require user confirmation for Just-Works pairing to prevent
* impersonation attacks, or in case of a legitimate device that is
* repairing use the confirmation as acknowledgment to proceed with the
* creation of new keys.
*/
confirm_hint = smp->method == JUST_WORKS ? 1 : 0;
err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
hcon->dst_type, passkey, confirm_hint);

View File

@@ -623,6 +623,7 @@ static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
skb->protocol = htons(ETH_P_MCTP);
skb->pkt_type = PACKET_OUTGOING;
skb->dev = dst->dev->dev;
if (skb->len > dst->mtu) {
kfree_skb(skb);

View File

@@ -2665,7 +2665,7 @@ static void __mptcp_retrans(struct sock *sk)
}
if (!mptcp_send_head(sk))
return;
goto clear_scheduled;
goto reset_timer;
}
@@ -2696,7 +2696,7 @@ static void __mptcp_retrans(struct sock *sk)
if (__mptcp_check_fallback(msk)) {
spin_unlock_bh(&msk->fallback_lock);
release_sock(ssk);
return;
goto clear_scheduled;
}
while (info.sent < info.limit) {
@@ -2728,6 +2728,15 @@ reset_timer:
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
clear_scheduled:
/* If no rtx data was available or in case of fallback, there
* could be left-over scheduled subflows; clear them all
* or later xmit could use bad ones
*/
mptcp_for_each_subflow(msk, subflow)
if (READ_ONCE(subflow->scheduled))
mptcp_subflow_set_scheduled(subflow, false);
}
/* schedule the timeout timer for the relevant event: either close timeout
@@ -2789,6 +2798,12 @@ static void mptcp_do_fastclose(struct sock *sk)
goto unlock;
subflow->send_fastclose = 1;
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
* issue in __tcp_select_window(), see tcp_disconnect().
*/
inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
tcp_send_active_reset(ssk, ssk->sk_allocation,
SK_RST_REASON_TCP_ABORT_ON_CLOSE);
unlock:

View File

@@ -99,6 +99,9 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
int i;
const struct can_filter *lp;
if (!pskb_may_pull(skb, CAN_MTU))
return 0;
can_id = em_canid_get_id(skb);
if (can_id & CAN_EFF_FLAG) {

View File

@@ -22,9 +22,12 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct tcf_em_cmp *cmp = (struct tcf_em_cmp *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off;
unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer);
u32 val = 0;
if (!ptr)
return 0;
ptr += cmp->off;
if (!tcf_valid_offset(skb, ptr, cmp->align))
return 0;

View File

@@ -42,6 +42,8 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
struct nbyte_data *nbyte = (struct nbyte_data *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
if (!ptr)
return 0;
ptr += nbyte->hdr.off;
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))

View File

@@ -29,12 +29,19 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
struct text_match *tm = EM_TEXT_PRIV(m);
unsigned char *ptr;
int from, to;
from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
ptr = tcf_get_base_ptr(skb, tm->from_layer);
if (!ptr)
return 0;
from = ptr - skb->data;
from += tm->from_offset;
to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
ptr = tcf_get_base_ptr(skb, tm->to_layer);
if (!ptr)
return 0;
to = ptr - skb->data;
to += tm->to_offset;
return skb_find_text(skb, from, to, tm->config) != UINT_MAX;

View File

@@ -36,20 +36,13 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET 32
struct xsk_addr_node {
u64 addr;
struct list_head addr_node;
};
struct xsk_addr_head {
struct xsk_addrs {
u32 num_descs;
struct list_head addrs_list;
u64 addrs[MAX_SKB_FRAGS + 1];
};
static struct kmem_cache *xsk_tx_generic_cache;
#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -558,29 +551,68 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
return ret;
}
static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
{
return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
}
static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
{
return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
{
struct xsk_addrs *xsk_addr;
if (!xsk_skb_destructor_is_addr(skb)) {
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
xsk_addr->num_descs++;
}
}
static u32 xsk_get_num_desc(struct sk_buff *skb)
{
struct xsk_addrs *xsk_addr;
if (xsk_skb_destructor_is_addr(skb))
return 1;
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
return xsk_addr->num_descs;
}
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
struct xsk_addr_node *pos, *tmp;
u32 num_descs = xsk_get_num_desc(skb);
struct xsk_addrs *xsk_addr;
u32 descs_processed = 0;
unsigned long flags;
u32 idx;
u32 idx, i;
spin_lock_irqsave(&pool->cq_lock, flags);
idx = xskq_get_prod(pool->cq);
xskq_prod_write_addr(pool->cq, idx,
(u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
descs_processed++;
if (unlikely(num_descs > 1)) {
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
if (unlikely(XSKCB(skb)->num_descs > 1)) {
list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
for (i = 0; i < num_descs; i++) {
xskq_prod_write_addr(pool->cq, idx + descs_processed,
pos->addr);
xsk_addr->addrs[i]);
descs_processed++;
list_del(&pos->addr_node);
kmem_cache_free(xsk_tx_generic_cache, pos);
}
kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
} else {
xskq_prod_write_addr(pool->cq, idx,
xsk_skb_destructor_get_addr(skb));
descs_processed++;
}
xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_lock, flags);
@@ -595,16 +627,6 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
{
XSKCB(skb)->num_descs++;
}
static u32 xsk_get_num_desc(struct sk_buff *skb)
{
return XSKCB(skb)->num_descs;
}
static void xsk_destruct_skb(struct sk_buff *skb)
{
struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
@@ -621,27 +643,22 @@ static void xsk_destruct_skb(struct sk_buff *skb)
static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
u64 addr)
{
BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
skb->dev = xs->dev;
skb->priority = READ_ONCE(xs->sk.sk_priority);
skb->mark = READ_ONCE(xs->sk.sk_mark);
XSKCB(skb)->num_descs = 0;
skb->destructor = xsk_destruct_skb;
skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
xsk_skb_destructor_set_addr(skb, addr);
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
u32 num_descs = xsk_get_num_desc(skb);
struct xsk_addr_node *pos, *tmp;
struct xsk_addrs *xsk_addr;
if (unlikely(num_descs > 1)) {
list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
list_del(&pos->addr_node);
kmem_cache_free(xsk_tx_generic_cache, pos);
}
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
}
skb->destructor = sock_wfree;
@@ -701,7 +718,6 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
@@ -727,16 +743,26 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
}
} else {
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
if (!xsk_addr)
return ERR_PTR(-ENOMEM);
struct xsk_addrs *xsk_addr;
if (xsk_skb_destructor_is_addr(skb)) {
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
GFP_KERNEL);
if (!xsk_addr)
return ERR_PTR(-ENOMEM);
xsk_addr->num_descs = 1;
xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
} else {
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
}
/* in case of -EOVERFLOW that could happen below,
* xsk_consume_skb() will release this node as whole skb
* would be dropped, which implies freeing all list elements
*/
xsk_addr->addr = desc->addr;
list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
len = desc->len;
@@ -813,10 +839,25 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
}
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
struct xsk_addr_node *xsk_addr;
struct xsk_addrs *xsk_addr;
struct page *page;
u8 *vaddr;
if (xsk_skb_destructor_is_addr(skb)) {
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
GFP_KERNEL);
if (!xsk_addr) {
err = -ENOMEM;
goto free_err;
}
xsk_addr->num_descs = 1;
xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
} else {
xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
}
if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
err = -EOVERFLOW;
goto free_err;
@@ -828,13 +869,6 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
if (!xsk_addr) {
__free_page(page);
err = -ENOMEM;
goto free_err;
}
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
@@ -842,8 +876,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
xsk_addr->addr = desc->addr;
list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
}
@@ -1904,7 +1937,7 @@ static int __init xsk_init(void)
goto out_pernet;
xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
sizeof(struct xsk_addr_node),
sizeof(struct xsk_addrs),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!xsk_tx_generic_cache) {
err = -ENOMEM;