hi all this patch introduces on 8139cp the facilities defined in include/asm/pci.h and explained in Documentation/DMA-mapping.txt to save space in those archs where pci_unmap_single/page si a nop. I tested it only on x86 and seems to works fine, if there is any trouble with other archs, let me know. regards. Michele 'mydecay' Marchetto S.P.I.N.E. Group - www.spine-group.org --- 8139cp.c.old 2003-09-23 14:47:53.000000000 +0200 +++ 8139cp.c 2003-09-23 16:40:01.036420920 +0200 @@ -304,12 +304,12 @@ struct cp_desc { u32 opts1; u32 opts2; - u64 addr; +// u64 addr; }; struct ring_info { struct sk_buff *skb; - dma_addr_t mapping; + DECLARE_PCI_UNMAP_ADDR(mapping) unsigned frag; }; @@ -521,7 +521,7 @@ while (1) { u32 status, len; - dma_addr_t mapping; +// dma_addr_t mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; unsigned buflen; @@ -536,7 +536,7 @@ break; len = (status & 0x1fff) - 4; - mapping = cp->rx_skb[rx_tail].mapping; +// mapping = cp->rx_skb[rx_tail].mapping; if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { /* we don't support incoming fragmented frames. @@ -569,7 +569,8 @@ skb_reserve(new_skb, RX_OFFSET); new_skb->dev = cp->dev; - pci_unmap_single(cp->pdev, mapping, + pci_unmap_single(cp->pdev, + pci_unmap_addr(cp, rx_skb[rx_tail].mapping), buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ @@ -580,10 +581,9 @@ skb_put(skb, len); - mapping = - cp->rx_skb[rx_tail].mapping = + pci_unmap_addr_set(cp, rx_skb[rx_tail].mapping, pci_map_single(cp->pdev, new_skb->tail, - buflen, PCI_DMA_FROMDEVICE); + buflen, PCI_DMA_FROMDEVICE)); cp->rx_skb[rx_tail].skb = new_skb; cp_rx_skb(cp, skb, desc); @@ -591,7 +591,8 @@ rx_next: cp->rx_ring[rx_tail].opts2 = 0; - cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); +/* pci_unmap_addr_set(cp, rx_ring[rx_tail].addr, + cpu_to_le64(rx_skb[rx_tail].mapping)); */ if (rx_tail == (CP_RX_RING_SIZE - 1)) desc->opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); @@ -687,8 +688,10 @@ if (!skb) BUG(); - pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, - skb->len, PCI_DMA_TODEVICE); + pci_unmap_single(cp->pdev, + pci_unmap_addr(cp, tx_skb[tx_tail].mapping), + skb->len, + PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { @@ -761,7 +764,7 @@ len = skb->len; mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); - txd->addr = cpu_to_le64(mapping); +// txd->addr = cpu_to_le64(mapping); wmb(); if (skb->ip_summed == CHECKSUM_HW) { @@ -782,7 +785,7 @@ wmb(); cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = mapping; + pci_unmap_addr_set(cp, tx_skb[entry].mapping, mapping); cp->tx_skb[entry].frag = 0; entry = NEXT_TX(entry); } else { @@ -800,7 +803,7 @@ first_mapping = pci_map_single(cp->pdev, skb->data, first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = first_mapping; + pci_unmap_addr_set(cp, tx_skb[entry].mapping, first_mapping); cp->tx_skb[entry].frag = 1; entry = NEXT_TX(entry); @@ -833,21 +836,21 @@ txd = &cp->tx_ring[entry]; CP_VLAN_TX_TAG(txd, vlan_tag); - txd->addr = cpu_to_le64(mapping); +// txd->addr = cpu_to_le64(mapping); wmb(); txd->opts1 = cpu_to_le32(ctrl); wmb(); cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = mapping; + pci_unmap_addr_set(cp, tx_skb[entry].mapping, mapping); cp->tx_skb[entry].frag = frag + 2; entry = NEXT_TX(entry); } txd = &cp->tx_ring[first_entry]; CP_VLAN_TX_TAG(txd, vlan_tag); - txd->addr = cpu_to_le64(first_mapping); +// txd->addr = cpu_to_le64(first_mapping); wmb(); if (skb->ip_summed == CHECKSUM_HW) { @@ -1055,13 +1058,18 @@ skb->dev = cp->dev; skb_reserve(skb, RX_OFFSET); - cp->rx_skb[i].mapping = pci_map_single(cp->pdev, - skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(cp, rx_skb[i].mapping, + pci_map_single(cp->pdev, + skb->tail, + cp->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + cp->rx_skb[i].skb = skb; cp->rx_skb[i].frag = 0; cp->rx_ring[i].opts2 = 0; - cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); +/* pci_unmap_addr_set(cp, rx_ring[i].addr, + cpu_to_le64(cp->rx_skb[i].mapping)); */ if (i == (CP_RX_RING_SIZE - 1)) cp->rx_ring[i].opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); @@ -1115,8 +1123,11 @@ for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i].skb) { - pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, - cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + pci_unmap_single(cp->pdev, + pci_unmap_addr(cp, rx_skb[i].mapping), + cp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(cp->rx_skb[i].skb); } } @@ -1124,8 +1135,11 @@ for (i = 0; i < CP_TX_RING_SIZE; i++) { if (cp->tx_skb[i].skb) { struct sk_buff *skb = cp->tx_skb[i].skb; - pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, - skb->len, PCI_DMA_TODEVICE); + pci_unmap_single(cp->pdev, + pci_unmap_addr(cp, tx_skb[i].mapping), + skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(skb); cp->net_stats.tx_dropped++; } - : send the line "unsubscribe linux-net" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html