+
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
{
unsigned int length;
- unsigned int sync;
u32 act;
length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
act = bpf_prog_run_xdp(prog, xdp);
-
- /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
- sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
- sync = max(sync, length);
-
switch (act) {
case XDP_PASS:
return false;
@@ -1027,8 +1149,21 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
trace_xdp_exception(rx->adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
- page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
- sync, true);
+ if (zc) {
+ xsk_buff_free(xdp);
+ } else {
+ unsigned int sync;
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max
+ * len CPU touch
+ */
+ sync = xdp->data_end - xdp->data_hard_start -
+ XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+ page_pool_put_page(rx->page_pool,
+ virt_to_head_page(xdp->data), sync,
+ true);
+ }
return true;
}
}
@@ -1181,7 +1316,8 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
length, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
- &xdp_status, tx_nq, tx);
+ &xdp_status, tx_nq, tx,
+ false);
if (consume) {
rx->packets++;
rx->bytes += length;
@@ -1205,6 +1341,125 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
return done;
}
+static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ struct page *page;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available,
+ reuse);
+ if (!entry->xdp) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ entry->xdp->data_end = entry->xdp->data + length;
+ xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
+ entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
+
+ consume = tsnep_xdp_run_prog(rx, prog, entry->xdp,
+ &xdp_status, tx_nq, tx,
+ true);