[patch 7/9] s390: qeth tcp segmentation offload.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[patch 7/9] s390: qeth tcp segmentation offload.

From: Frank Pavlic <pavlic@xxxxxxxxxx>

Add support for TCP Segmentation Offload to the qeth network driver.

Signed-off-by: Martin Schwidefsky <schwidefsky@xxxxxxxxxx>

diffstat:
 drivers/s390/net/Makefile    |    2 
 drivers/s390/net/qeth.h      |   94 +++++-
 drivers/s390/net/qeth_eddp.c |  643 +++++++++++++++++++++++++++++++++++++++++++
 drivers/s390/net/qeth_eddp.h |   85 +++++
 drivers/s390/net/qeth_main.c |  548 +++++++++++++++++++++++++++---------
 drivers/s390/net/qeth_mpc.h  |    5 
 drivers/s390/net/qeth_proc.c |   11 
 drivers/s390/net/qeth_sys.c  |   56 +++
 drivers/s390/net/qeth_tso.c  |  285 +++++++++++++++++++
 drivers/s390/net/qeth_tso.h  |   58 +++
 10 files changed, 1639 insertions(+), 148 deletions(-)

diff -urpN linux-2.6/drivers/s390/net/Makefile linux-2.6-patched/drivers/s390/net/Makefile
--- linux-2.6/drivers/s390/net/Makefile	2005-03-02 08:38:37.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/Makefile	2005-03-24 14:03:05.000000000 +0100
@@ -9,6 +9,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
 obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
 obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
 obj-$(CONFIG_LCS) += lcs.o cu3088.o
-qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o
+qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o
 qeth-$(CONFIG_PROC_FS) += qeth_proc.o
 obj-$(CONFIG_QETH) += qeth.o
diff -urpN linux-2.6/drivers/s390/net/qeth_eddp.c linux-2.6-patched/drivers/s390/net/qeth_eddp.c
--- linux-2.6/drivers/s390/net/qeth_eddp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_eddp.c	2005-03-24 14:03:05.000000000 +0100
@@ -0,0 +1,643 @@
+/*
+ *
+ * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
+ *
+ * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ *    Author(s): Thomas Spatzier <tspat@xxxxxxxxxx>
+ *
+ *    $Revision: 1.11 $	 $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/kernel.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#include <linux/skbuff.h>
+
+#include <net/ip.h>
+
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_eddp.h"
+
+int
+qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
+				    struct qeth_eddp_context *ctx)
+{
+	int index = queue->next_buf_to_fill;
+	int elements_needed = ctx->num_elements;
+	int elements_in_buffer;
+	int skbs_in_buffer;
+	int buffers_needed = 0;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcbfc");
+	while(elements_needed > 0) {
+		buffers_needed++;
+		if (atomic_read(&queue->bufs[index].state) !=
+				QETH_QDIO_BUF_EMPTY)
+			return -EBUSY;
+
+		elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
+				     queue->bufs[index].next_element_to_fill;
+		skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
+		elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
+		index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
+	}
+	return buffers_needed;
+}
+
+static inline void
+qeth_eddp_free_context(struct qeth_eddp_context *ctx)
+{
+	int i;
+
+	QETH_DBF_TEXT(trace, 5, "eddpfctx");
+	for (i = 0; i < ctx->num_pages; ++i)
+		free_page((unsigned long)ctx->pages[i]);
+	kfree(ctx->pages);
+	if (ctx->elements != NULL)
+		kfree(ctx->elements);
+	kfree(ctx);
+}
+
+
+static inline void
+qeth_eddp_get_context(struct qeth_eddp_context *ctx)
+{
+	atomic_inc(&ctx->refcnt);
+}
+
+void
+qeth_eddp_put_context(struct qeth_eddp_context *ctx)
+{
+	if (atomic_dec_return(&ctx->refcnt) == 0)
+		qeth_eddp_free_context(ctx);
+}
+
+void
+qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
+{
+	struct qeth_eddp_context_reference *ref;
+
+	QETH_DBF_TEXT(trace, 6, "eddprctx");
+	while (!list_empty(&buf->ctx_list)){
+		ref = list_entry(buf->ctx_list.next,
+				 struct qeth_eddp_context_reference, list);
+		qeth_eddp_put_context(ref->ctx);
+		list_del(&ref->list);
+		kfree(ref);
+	}
+}
+
+static inline int
+qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
+			  struct qeth_eddp_context *ctx)
+{
+	struct qeth_eddp_context_reference *ref;
+
+	QETH_DBF_TEXT(trace, 6, "eddprfcx");
+	ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
+	if (ref == NULL)
+		return -ENOMEM;
+	qeth_eddp_get_context(ctx);
+	ref->ctx = ctx;
+	list_add_tail(&ref->list, &buf->ctx_list);
+	return 0;
+}
+
+int
+qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
+		      struct qeth_eddp_context *ctx,
+		      int index)
+{
+	struct qeth_qdio_out_buffer *buf = NULL;
+	struct qdio_buffer *buffer;
+	int elements = ctx->num_elements;
+	int element = 0;
+	int flush_cnt = 0;
+	int must_refcnt = 1;
+	int i;
+
+	QETH_DBF_TEXT(trace, 5, "eddpfibu");
+	while (elements > 0) {
+		buf = &queue->bufs[index];
+		if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
+			/* normally this should not happen since we checked for
+			 * available elements in qeth_check_elements_for_context
+			 */
+			if (element == 0)
+				return -EBUSY;
+			else {
+				PRINT_WARN("could only partially fill eddp "
+					   "buffer!\n");
+				goto out;
+			}
+		}
+		/* check if the whole next skb fits into current buffer */
+		if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
+					buf->next_element_to_fill)
+				< ctx->elements_per_skb){
+			/* no -> go to next buffer */
+			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
+			flush_cnt++;
+			/* new buffer, so we have to add ctx to buffer'ctx_list
+			 * and increment ctx's refcnt */
+			must_refcnt = 1;
+			continue;
+		}
+		if (must_refcnt){
+			must_refcnt = 0;
+			if (qeth_eddp_buf_ref_context(buf, ctx)){
+				PRINT_WARN("no memory to create eddp context "
+					   "reference\n");
+				goto out_check;
+			}
+		}
+		buffer = buf->buffer;
+		/* fill one skb into buffer */
+		for (i = 0; i < ctx->elements_per_skb; ++i){
+			buffer->element[buf->next_element_to_fill].addr =
+				ctx->elements[element].addr;
+			buffer->element[buf->next_element_to_fill].length =
+				ctx->elements[element].length;
+			buffer->element[buf->next_element_to_fill].flags =
+				ctx->elements[element].flags;
+			buf->next_element_to_fill++;
+			element++;
+			elements--;
+		}
+	}
+out_check:
+	if (!queue->do_pack) {
+		QETH_DBF_TEXT(trace, 6, "fillbfnp");
+		/* set state to PRIMED -> will be flushed */
+		if (buf->next_element_to_fill > 0){
+			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			flush_cnt++;
+		}
+	} else {
+#ifdef CONFIG_QETH_PERF_STATS
+		queue->card->perf_stats.skbs_sent_pack++;
+#endif
+		QETH_DBF_TEXT(trace, 6, "fillbfpa");
+		if (buf->next_element_to_fill >=
+				QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
+			/*
+			 * packed buffer if full -> set state PRIMED
+			 * -> will be flushed
+			 */
+			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			flush_cnt++;
+		}
+	}
+out:
+	return flush_cnt;
+}
+
+static inline int
+qeth_get_skb_data_len(struct sk_buff *skb)
+{
+	int len = skb->len;
+	int i;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
+		len -= skb_shinfo(skb)->frags[i].size;
+	return len;
+}
+
+static inline void
+qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
+			      struct qeth_eddp_data *eddp)
+{
+	u8 *page;
+	int page_remainder;
+	int page_offset;
+	int hdr_len;
+	struct qeth_eddp_element *element;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcrsh");
+	page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+	page_offset = ctx->offset % PAGE_SIZE;
+	element = &ctx->elements[ctx->num_elements];
+	hdr_len = eddp->nhl + eddp->thl;
+	/* FIXME: layer2 and VLAN !!! */
+	if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
+		hdr_len += ETH_HLEN;
+	if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
+		hdr_len += VLAN_HLEN;
+	/* does complete header fit in current page ? */
+	page_remainder = PAGE_SIZE - page_offset;
+	if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
+		/* no -> go to start of next page */
+		ctx->offset += page_remainder;
+		page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+		page_offset = 0;
+	}
+	memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
+	element->addr = page + page_offset;
+	element->length = sizeof(struct qeth_hdr);
+	ctx->offset += sizeof(struct qeth_hdr);
+	page_offset += sizeof(struct qeth_hdr);
+	/* add mac header (?) */
+	if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
+		memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
+		element->length += ETH_HLEN;
+		ctx->offset += ETH_HLEN;
+		page_offset += ETH_HLEN;
+	}
+	/* add VLAN tag */
+	if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
+		memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
+		element->length += VLAN_HLEN;
+		ctx->offset += VLAN_HLEN;
+		page_offset += VLAN_HLEN;
+	}
+	/* add network header */
+	memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
+	element->length += eddp->nhl;
+	eddp->nh_in_ctx = page + page_offset;
+	ctx->offset += eddp->nhl;
+	page_offset += eddp->nhl;
+	/* add transport header */
+	memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
+	element->length += eddp->thl;
+	eddp->th_in_ctx = page + page_offset;
+	ctx->offset += eddp->thl;
+}
+
+static inline void
+qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
+			u32 *hcsum)
+{
+	struct skb_frag_struct *frag;
+	int left_in_frag;
+	int copy_len;
+	u8 *src;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcdtc");
+	if (skb_shinfo(eddp->skb)->nr_frags == 0) {
+		memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
+		*hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
+				      *hcsum);
+		eddp->skb_offset += len;
+	} else {
+		while (len > 0) {
+			if (eddp->frag < 0) {
+				/* we're in skb->data */
+				left_in_frag = qeth_get_skb_data_len(eddp->skb)
+						- eddp->skb_offset;
+				src = eddp->skb->data + eddp->skb_offset;
+			} else {
+				frag = &skb_shinfo(eddp->skb)->
+					frags[eddp->frag];
+				left_in_frag = frag->size - eddp->frag_offset;
+				src = (u8 *)(
+					(page_to_pfn(frag->page) << PAGE_SHIFT)+
+					frag->page_offset + eddp->frag_offset);
+			}
+			if (left_in_frag <= 0) {
+				eddp->frag++;
+				eddp->frag_offset = 0;
+				continue;
+			}
+			copy_len = min(left_in_frag, len);
+			memcpy(dst, src, copy_len);
+			*hcsum = csum_partial(src, copy_len, *hcsum);
+			dst += copy_len;
+			eddp->frag_offset += copy_len;
+			eddp->skb_offset += copy_len;
+			len -= copy_len;
+		}
+	}
+}
+
+static inline void
+qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
+				  struct qeth_eddp_data *eddp, int data_len,
+				  u32 hcsum)
+{
+	u8 *page;
+	int page_remainder;
+	int page_offset;
+	struct qeth_eddp_element *element;
+	int first_lap = 1;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcsdt");
+	page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+	page_offset = ctx->offset % PAGE_SIZE;
+	element = &ctx->elements[ctx->num_elements];
+	while (data_len){
+		page_remainder = PAGE_SIZE - page_offset;
+		if (page_remainder < data_len){
+			qeth_eddp_copy_data_tcp(page + page_offset, eddp,
+						page_remainder, &hcsum);
+			element->length += page_remainder;
+			if (first_lap)
+				element->flags = SBAL_FLAGS_FIRST_FRAG;
+			else
+				element->flags = SBAL_FLAGS_MIDDLE_FRAG;
+			ctx->num_elements++;
+			element++;
+			data_len -= page_remainder;
+			ctx->offset += page_remainder;
+			page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+			page_offset = 0;
+			element->addr = page + page_offset;
+		} else {
+			qeth_eddp_copy_data_tcp(page + page_offset, eddp,
+						data_len, &hcsum);
+			element->length += data_len;
+			if (!first_lap)
+				element->flags = SBAL_FLAGS_LAST_FRAG;
+			ctx->num_elements++;
+			ctx->offset += data_len;
+			data_len = 0;
+		}
+		first_lap = 0;
+	}
+	((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
+}
+
+static inline u32
+qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
+{
+	u32 phcsum; /* pseudo header checksum */
+
+	QETH_DBF_TEXT(trace, 5, "eddpckt4");
+	eddp->th.tcp.h.check = 0;
+	/* compute pseudo header checksum */
+	phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
+				    eddp->thl + data_len, IPPROTO_TCP, 0);
+	/* compute checksum of tcp header */
+	return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
+}
+
+static inline u32
+qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
+{
+	u32 proto;
+	u32 phcsum; /* pseudo header checksum */
+
+	QETH_DBF_TEXT(trace, 5, "eddpckt6");
+	eddp->th.tcp.h.check = 0;
+	/* compute pseudo header checksum */
+	phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
+			      sizeof(struct in6_addr), 0);
+	phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
+			      sizeof(struct in6_addr), phcsum);
+	proto = htonl(IPPROTO_TCP);
+	phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
+	return phcsum;
+}
+
+static inline struct qeth_eddp_data *
+qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
+{
+	struct qeth_eddp_data *eddp;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcrda");
+	eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
+	if (eddp){
+		memset(eddp, 0, sizeof(struct qeth_eddp_data));
+		eddp->nhl = nhl;
+		eddp->thl = thl;
+		memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
+		memcpy(&eddp->nh, nh, nhl);
+		memcpy(&eddp->th, th, thl);
+		eddp->frag = -1; /* initially we're in skb->data */
+	}
+	return eddp;
+}
+
+static inline void
+__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
+			     struct qeth_eddp_data *eddp)
+{
+	struct tcphdr *tcph;
+	int data_len;
+	u32 hcsum;
+
+	QETH_DBF_TEXT(trace, 5, "eddpftcp");
+	eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
+	tcph = eddp->skb->h.th;
+	while (eddp->skb_offset < eddp->skb->len) {
+		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
+			       (int)(eddp->skb->len - eddp->skb_offset));
+		/* prepare qdio hdr */
+		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
+			eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
+						     eddp->nhl + eddp->thl -
+						     sizeof(struct qeth_hdr);
+#ifdef CONFIG_QETH_VLAN
+			if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
+				eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
+#endif /* CONFIG_QETH_VLAN */
+		} else
+			eddp->qh.hdr.l3.length = data_len + eddp->nhl +
+						 eddp->thl;
+		/* prepare ip hdr */
+		if (eddp->skb->protocol == ETH_P_IP){
+			eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
+						 eddp->thl;
+			eddp->nh.ip4.h.check = 0;
+			eddp->nh.ip4.h.check =
+				ip_fast_csum((u8 *)&eddp->nh.ip4.h,
+						eddp->nh.ip4.h.ihl);
+		} else
+			eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
+		/* prepare tcp hdr */
+		if (data_len == (eddp->skb->len - eddp->skb_offset)){
+			/* last segment -> set FIN and PSH flags */
+			eddp->th.tcp.h.fin = tcph->fin;
+			eddp->th.tcp.h.psh = tcph->psh;
+		}
+		if (eddp->skb->protocol == ETH_P_IP)
+			hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
+		else
+			hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
+		/* fill the next segment into the context */
+		qeth_eddp_create_segment_hdrs(ctx, eddp);
+		qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
+		if (eddp->skb_offset >= eddp->skb->len)
+			break;
+		/* prepare headers for next round */
+		if (eddp->skb->protocol == ETH_P_IP)
+			eddp->nh.ip4.h.id++;
+		eddp->th.tcp.h.seq += data_len;
+	}
+}
+
+static inline int
+qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
+			   struct sk_buff *skb, struct qeth_hdr *qhdr)
+{
+	struct qeth_eddp_data *eddp = NULL;
+
+	QETH_DBF_TEXT(trace, 5, "eddpficx");
+	/* create our segmentation headers and copy original headers */
+	if (skb->protocol == ETH_P_IP)
+		eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
+				skb->nh.iph->ihl*4,
+				(u8 *)skb->h.th, skb->h.th->doff*4);
+	else
+		eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
+				sizeof(struct ipv6hdr),
+				(u8 *)skb->h.th, skb->h.th->doff*4);
+
+	if (eddp == NULL) {
+		QETH_DBF_TEXT(trace, 2, "eddpfcnm");
+		return -ENOMEM;
+	}
+	if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
+		memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
+#ifdef CONFIG_QETH_VLAN
+		if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
+			eddp->vlan[0] = __constant_htons(skb->protocol);
+			eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
+		}
+#endif /* CONFIG_QETH_VLAN */
+	}
+	/* the next flags will only be set on the last segment */
+	eddp->th.tcp.h.fin = 0;
+	eddp->th.tcp.h.psh = 0;
+	eddp->skb = skb;
+	/* begin segmentation and fill context */
+	__qeth_eddp_fill_context_tcp(ctx, eddp);
+	kfree(eddp);
+	return 0;
+}
+
+static inline void
+qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
+			 int hdr_len)
+{
+	int skbs_per_page;
+
+	QETH_DBF_TEXT(trace, 5, "eddpcanp");
+	/* can we put multiple skbs in one page? */
+	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
+	if (skbs_per_page > 1){
+		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
+				 skbs_per_page + 1;
+		ctx->elements_per_skb = 1;
+	} else {
+		/* no -> how many elements per skb? */
+		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
+				     PAGE_SIZE) >> PAGE_SHIFT;
+		ctx->num_pages = ctx->elements_per_skb *
+				 (skb_shinfo(skb)->tso_segs + 1);
+	}
+	ctx->num_elements = ctx->elements_per_skb *
+			    (skb_shinfo(skb)->tso_segs + 1);
+}
+
+static inline struct qeth_eddp_context *
+qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
+				 int hdr_len)
+{
+	struct qeth_eddp_context *ctx = NULL;
+	u8 *addr;
+	int i;
+
+	QETH_DBF_TEXT(trace, 5, "creddpcg");
+	/* create the context and allocate pages */
+	ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
+	if (ctx == NULL){
+		QETH_DBF_TEXT(trace, 2, "ceddpcn1");
+		return NULL;
+	}
+	memset(ctx, 0, sizeof(struct qeth_eddp_context));
+	ctx->type = QETH_LARGE_SEND_EDDP;
+	qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
+	if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
+		QETH_DBF_TEXT(trace, 2, "ceddpcis");
+		kfree(ctx);
+		return NULL;
+	}
+	ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
+	if (ctx->pages == NULL){
+		QETH_DBF_TEXT(trace, 2, "ceddpcn2");
+		kfree(ctx);
+		return NULL;
+	}
+	memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
+	for (i = 0; i < ctx->num_pages; ++i){
+		addr = (u8 *)__get_free_page(GFP_ATOMIC);
+		if (addr == NULL){
+			QETH_DBF_TEXT(trace, 2, "ceddpcn3");
+			ctx->num_pages = i;
+			qeth_eddp_free_context(ctx);
+			return NULL;
+		}
+		memset(addr, 0, PAGE_SIZE);
+		ctx->pages[i] = addr;
+	}
+	ctx->elements = kmalloc(ctx->num_elements *
+				sizeof(struct qeth_eddp_element), GFP_ATOMIC);
+	if (ctx->elements == NULL){
+		QETH_DBF_TEXT(trace, 2, "ceddpcn4");
+		qeth_eddp_free_context(ctx);
+		return NULL;
+	}
+	memset(ctx->elements, 0,
+	       ctx->num_elements * sizeof(struct qeth_eddp_element));
+	/* reset num_elements; will be incremented again in fill_buffer to
+	 * reflect number of actually used elements */
+	ctx->num_elements = 0;
+	return ctx;
+}
+
+static inline struct qeth_eddp_context *
+qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
+			     struct qeth_hdr *qhdr)
+{
+	struct qeth_eddp_context *ctx = NULL;
+
+	QETH_DBF_TEXT(trace, 5, "creddpct");
+	if (skb->protocol == ETH_P_IP)
+		ctx = qeth_eddp_create_context_generic(card, skb,
+			sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
+			skb->h.th->doff*4);
+	else if (skb->protocol == ETH_P_IPV6)
+		ctx = qeth_eddp_create_context_generic(card, skb,
+			sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
+			skb->h.th->doff*4);
+	else
+		QETH_DBF_TEXT(trace, 2, "cetcpinv");
+
+	if (ctx == NULL) {
+		QETH_DBF_TEXT(trace, 2, "creddpnl");
+		return NULL;
+	}
+	if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
+		QETH_DBF_TEXT(trace, 2, "ceddptfe");
+		qeth_eddp_free_context(ctx);
+		return NULL;
+	}
+	atomic_set(&ctx->refcnt, 1);
+	return ctx;
+}
+
+struct qeth_eddp_context *
+qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
+			 struct qeth_hdr *qhdr)
+{
+	QETH_DBF_TEXT(trace, 5, "creddpc");
+	switch (skb->sk->sk_protocol){
+	case IPPROTO_TCP:
+		return qeth_eddp_create_context_tcp(card, skb, qhdr);
+	default:
+		QETH_DBF_TEXT(trace, 2, "eddpinvp");
+	}
+	return NULL;
+}
+
+
diff -urpN linux-2.6/drivers/s390/net/qeth_eddp.h linux-2.6-patched/drivers/s390/net/qeth_eddp.h
--- linux-2.6/drivers/s390/net/qeth_eddp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_eddp.h	2005-03-24 14:03:05.000000000 +0100
@@ -0,0 +1,85 @@
+/*
+ * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.5 $)
+ *
+ * Header file for qeth enhanced device driver pakcing.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ *    Author(s): Thomas Spatzier <tspat@xxxxxxxxxx>
+ *
+ *    $Revision: 1.5 $	 $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#ifndef __QETH_EDDP_H__
+#define __QETH_EDDP_H__
+
+struct qeth_eddp_element {
+	u32 flags;
+	u32 length;
+	void *addr;
+};
+
+struct qeth_eddp_context {
+	atomic_t refcnt;
+	enum qeth_large_send_types type;
+	int num_pages;			    /* # of allocated pages */
+	u8 **pages;			    /* pointers to pages */
+	int offset;			    /* offset in ctx during creation */
+	int num_elements;		    /* # of required 'SBALEs' */
+	struct qeth_eddp_element *elements; /* array of 'SBALEs' */
+	int elements_per_skb;		    /* # of 'SBALEs' per skb **/
+};
+
+struct qeth_eddp_context_reference {
+	struct list_head list;
+	struct qeth_eddp_context *ctx;
+};
+
+extern struct qeth_eddp_context *
+qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,struct qeth_hdr *);
+
+extern void
+qeth_eddp_put_context(struct qeth_eddp_context *);
+
+extern int
+qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
+
+extern void
+qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
+
+extern int
+qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
+				    struct qeth_eddp_context *);
+/*
+ * Data used for fragmenting a IP packet.
+ */
+struct qeth_eddp_data {
+	struct qeth_hdr qh;
+	struct ethhdr mac;
+	u16 vlan[2];
+	union {
+		struct {
+			struct iphdr h;
+			u8 options[40];
+		} ip4;
+		struct {
+			struct ipv6hdr h;
+		} ip6;
+	} nh;
+	u8 nhl;
+	void *nh_in_ctx;	/* address of nh within the ctx */
+	union {
+		struct {
+			struct tcphdr h;
+			u8 options[40];
+		} tcp;
+	} th;
+	u8 thl;
+	void *th_in_ctx;	/* address of th within the ctx */
+	struct sk_buff *skb;
+	int skb_offset;
+	int frag;
+	int frag_offset;
+} __attribute__ ((packed));
+
+#endif /* __QETH_EDDP_H__ */
diff -urpN linux-2.6/drivers/s390/net/qeth.h linux-2.6-patched/drivers/s390/net/qeth.h
--- linux-2.6/drivers/s390/net/qeth.h	2005-03-24 14:03:04.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth.h	2005-03-24 14:03:05.000000000 +0100
@@ -68,7 +68,8 @@
 #define QETH_DBF_TRACE_LEN 8
 #define QETH_DBF_TRACE_INDEX 2
 #define QETH_DBF_TRACE_NR_AREAS 2
-#define QETH_DBF_TRACE_LEVEL 5
+#define QETH_DBF_TRACE_LEVEL 3
+extern debug_info_t *qeth_dbf_trace;
 
 #define QETH_DBF_SENSE_NAME "qeth_sense"
 #define QETH_DBF_SENSE_LEN 64
@@ -206,6 +207,11 @@ struct qeth_perf_stats {
 	__u64 outbound_do_qdio_start_time;
 	unsigned int outbound_do_qdio_cnt;
 	unsigned int outbound_do_qdio_time;
+	/* eddp data */
+	unsigned int large_send_bytes;
+	unsigned int large_send_cnt;
+	unsigned int sg_skbs_sent;
+	unsigned int sg_frags_sent;
 };
 #endif /* CONFIG_QETH_PERF_STATS */
 
@@ -334,8 +340,8 @@ qeth_is_ipa_enabled(struct qeth_ipa_info
 struct qeth_hdr_layer3 {
 	__u8  id;
 	__u8  flags;
-	__u16 inbound_checksum;
-	__u32 token;
+	__u16 inbound_checksum; /*TSO:__u16 seqno */
+	__u32 token;		/*TSO: __u32 reserved */
 	__u16 length;
 	__u8  vlan_prio;
 	__u8  ext_flags;
@@ -386,6 +392,7 @@ enum qeth_layer2_frame_flags {
 enum qeth_header_ids {
 	QETH_HEADER_TYPE_LAYER3 = 0x01,
 	QETH_HEADER_TYPE_LAYER2 = 0x02,
+	QETH_HEADER_TYPE_TSO	= 0x03,
 };
 /* flags for qeth_hdr.ext_flags */
 #define QETH_HDR_EXT_VLAN_FRAME       0x01
@@ -394,6 +401,7 @@ enum qeth_header_ids {
 #define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
 #define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
 #define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
+#define QETH_HDR_EXT_UDP_TSO          0x40 /*bit off for TCP*/
 
 static inline int
 qeth_is_last_sbale(struct qdio_buffer_element *sbale)
@@ -448,11 +456,19 @@ struct qeth_qdio_q {
 	volatile int next_buf_to_init;
 } __attribute__ ((aligned(256)));
 
+/* possible types of qeth large_send support */
+enum qeth_large_send_types {
+	QETH_LARGE_SEND_NO,
+	QETH_LARGE_SEND_EDDP,
+	QETH_LARGE_SEND_TSO,
+};
+
 struct qeth_qdio_out_buffer {
 	struct qdio_buffer *buffer;
 	atomic_t state;
 	volatile int next_element_to_fill;
 	struct sk_buff_head skb_list;
+	struct list_head ctx_list;
 };
 
 struct qeth_card;
@@ -713,6 +729,7 @@ struct qeth_card_options {
 	int add_hhlen;
 	int fake_ll;
 	int layer2;
+	enum qeth_large_send_types large_send;
 };
 
 /*
@@ -799,6 +816,57 @@ qeth_get_ipa_adp_type(enum qeth_link_typ
 }
 
 inline static int
+qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
+{
+	struct sk_buff *new_skb = NULL;
+
+	if (skb_headroom(*skb) < size){
+		new_skb = skb_realloc_headroom(*skb, size);
+		if (!new_skb) {
+                        PRINT_ERR("qeth_prepare_skb: could "
+                                  "not realloc headroom for qeth_hdr "
+                                  "on interface %s", QETH_CARD_IFNAME(card));
+                        return -ENOMEM;
+                }
+                *skb = new_skb;
+	}
+	return 0;
+}
+static inline struct sk_buff *
+qeth_pskb_unshare(struct sk_buff *skb, int pri)
+{
+        struct sk_buff *nskb;
+        if (!skb_cloned(skb))
+                return skb;
+        nskb = skb_copy(skb, pri);
+        kfree_skb(skb); /* free our shared copy */
+        return nskb;
+}
+
+
+inline static void *
+qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
+{
+        void *hdr;
+
+	hdr = (void *) skb_push(*skb, size);
+        /*
+         * sanity check, the Linux memory allocation scheme should
+         * never present us cases like this one (the qdio header size plus
+         * the first 40 bytes of the paket cross a 4k boundary)
+         */
+        if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
+            (((unsigned long) hdr + size +
+              QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
+                PRINT_ERR("qeth_prepare_skb: misaligned "
+                          "packet on interface %s. Discarded.",
+                          QETH_CARD_IFNAME(card));
+                return NULL;
+        }
+        return hdr;
+}
+
+inline static int
 qeth_get_hlen(__u8 link_type)
 {
 #ifdef CONFIG_QETH_IPV6
@@ -839,17 +907,6 @@ qeth_get_netdev_flags(struct qeth_card *
 #endif
 	}
 }
-static inline struct sk_buff *
-qeth_pskb_unshare(struct sk_buff *skb, int pri)
-{
-        struct sk_buff *nskb;
-        if (!skb_cloned(skb))
-                return skb;
-        nskb = skb_copy(skb, pri);
-        kfree_skb(skb); /* free our shared copy */
-        return nskb;
-}
-
 
 inline static int
 qeth_get_initial_mtu_for_card(struct qeth_card * card)
@@ -1093,4 +1150,13 @@ qeth_schedule_recovery(struct qeth_card 
 extern int
 qeth_realloc_buffer_pool(struct qeth_card *, int);
 
+extern int
+qeth_set_large_send(struct qeth_card *);
+
+extern void
+qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
+		 struct sk_buff *, int, int);
+extern void
+qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
+
 #endif /* __QETH_H__ */
diff -urpN linux-2.6/drivers/s390/net/qeth_main.c linux-2.6-patched/drivers/s390/net/qeth_main.c
--- linux-2.6/drivers/s390/net/qeth_main.c	2005-03-24 14:03:04.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_main.c	2005-03-24 14:03:05.000000000 +0100
@@ -61,6 +61,7 @@ qeth_eyecatcher(void)
 #include <linux/reboot.h>
 #include <linux/mii.h>
 #include <linux/rcupdate.h>
+#include <linux/ethtool.h>
 
 #include <net/arp.h>
 #include <net/ip.h>
@@ -76,6 +77,8 @@ qeth_eyecatcher(void)
 #include "qeth.h"
 #include "qeth_mpc.h"
 #include "qeth_fs.h"
+#include "qeth_eddp.h"
+#include "qeth_tso.h"
 
 #define VERSION_QETH_C "$Revision: 1.206 $"
 static const char *version = "qeth S/390 OSA-Express driver";
@@ -87,7 +90,7 @@ static debug_info_t *qeth_dbf_setup = NU
 static debug_info_t *qeth_dbf_data = NULL;
 static debug_info_t *qeth_dbf_misc = NULL;
 static debug_info_t *qeth_dbf_control = NULL;
-static debug_info_t *qeth_dbf_trace = NULL;
+debug_info_t *qeth_dbf_trace = NULL;
 static debug_info_t *qeth_dbf_sense = NULL;
 static debug_info_t *qeth_dbf_qerr = NULL;
 
@@ -1748,7 +1751,7 @@ qeth_send_control_data_cb(struct qeth_ch
 							(unsigned long)iob);
 			}
 			if (cmd)
-				reply->rc = (s16) cmd->hdr.return_code;
+				reply->rc = (u16) cmd->hdr.return_code;
 			else if (iob->rc)
 				reply->rc = iob->rc;
 			if (keep_reply) {
@@ -2461,14 +2464,15 @@ qeth_clear_output_buffer(struct qeth_qdi
 	if (buf->buffer->element[0].flags & 0x40)
 		atomic_dec(&queue->set_pci_flags_count);
 
+	while ((skb = skb_dequeue(&buf->skb_list))){
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+	qeth_eddp_buf_release_contexts(buf);
 	for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
 		buf->buffer->element[i].length = 0;
 		buf->buffer->element[i].addr = NULL;
 		buf->buffer->element[i].flags = 0;
-		while ((skb = skb_dequeue(&buf->skb_list))){
-			atomic_dec(&skb->users);
-			dev_kfree_skb_any(skb);
-		}
 	}
 	buf->next_element_to_fill = 0;
 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
@@ -2618,7 +2622,7 @@ qeth_handle_send_error(struct qeth_card 
 	return QETH_SEND_ERROR_LINK_FAILURE;
 }
 
-static inline void
+void
 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
 		   int index, int count)
 {
@@ -2659,9 +2663,6 @@ qeth_flush_buffers(struct qeth_qdio_out_
 				atomic_inc(&queue->set_pci_flags_count);
 				buf->buffer->element[0].flags |= 0x40;
 			}
-#ifdef CONFIG_QETH_PERF_STATS
-			queue->card->perf_stats.bufs_sent_pack++;
-#endif
 		}
 	}
 
@@ -2783,6 +2784,7 @@ qeth_check_outbound_queue(struct qeth_qd
 {
 	int index;
 	int flush_cnt = 0;
+	int q_was_packing = 0;
 
 	/*
 	 * check if weed have to switch to non-packing mode or if
@@ -2797,20 +2799,22 @@ qeth_check_outbound_queue(struct qeth_qd
 			 * do_send_packet. So, we check if there is a
 			 * packing buffer to be flushed here.
 			 */
-			/* TODO: try if we get a performance improvement
-			 * by calling netif_stop_queue here */
-			/* save start index for flushing */
+			netif_stop_queue(queue->card->dev);
 			index = queue->next_buf_to_fill;
+			q_was_packing = queue->do_pack;
 			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
 			if (!flush_cnt &&
 			    !atomic_read(&queue->set_pci_flags_count))
 				flush_cnt +=
 					qeth_flush_buffers_on_no_pci(queue);
-			/* were done with updating critical queue members */
-			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
-			/* flushing can be done outside the lock */
+#ifdef CONFIG_QETH_PERF_STATS
+			if (q_was_packing)
+				queue->card->perf_stats.bufs_sent_pack +=
+					flush_cnt;
+#endif
 			if (flush_cnt)
 				qeth_flush_buffers(queue, 1, index, flush_cnt);
+			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 		}
 	}
 }
@@ -2840,7 +2844,6 @@ qeth_qdio_output_handler(struct ccw_devi
 			return;
 		}
 	}
-
 #ifdef CONFIG_QETH_PERF_STATS
 	card->perf_stats.outbound_handler_cnt++;
 	card->perf_stats.outbound_handler_start_time = qeth_get_micros();
@@ -2861,7 +2864,7 @@ qeth_qdio_output_handler(struct ccw_devi
 	if (card->info.type != QETH_CARD_TYPE_IQD)
 		qeth_check_outbound_queue(queue);
 
-	netif_wake_queue(card->dev);
+	netif_wake_queue(queue->card->dev);
 #ifdef CONFIG_QETH_PERF_STATS
 	card->perf_stats.outbound_handler_time += qeth_get_micros() -
 		card->perf_stats.outbound_handler_start_time;
@@ -2940,7 +2943,7 @@ qeth_alloc_buffer_pool(struct qeth_card 
 	void *ptr;
 	int i, j;
 
-	QETH_DBF_TEXT(trace,5,"clwkpool");
+	QETH_DBF_TEXT(trace,5,"alocpool");
 	for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
 	 	pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
 		if (!pool_entry){
@@ -3028,12 +3031,13 @@ qeth_alloc_qdio_buffers(struct qeth_card
 		QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
 		memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
 		card->qdio.out_qs[i]->queue_no = i;
-		/* give inbound qeth_qdio_buffers their qdio_buffers */
+		/* give outbound qeth_qdio_buffers their qdio_buffers */
 		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
 			card->qdio.out_qs[i]->bufs[j].buffer =
 				&card->qdio.out_qs[i]->qdio_bufs[j];
 			skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
 					    skb_list);
+			INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
 		}
 	}
 	card->qdio.state = QETH_QDIO_ALLOCATED;
@@ -3477,24 +3481,19 @@ qeth_hard_start_xmit(struct sk_buff *skb
 	card->perf_stats.outbound_cnt++;
 	card->perf_stats.outbound_start_time = qeth_get_micros();
 #endif
-	/*
-	 * We only call netif_stop_queue in case of errors. Since we've
-	 * got our own synchronization on queues we can keep the stack's
-	 * queue running.
-	 */
-	if ((rc = qeth_send_packet(card, skb))){
+	netif_stop_queue(dev);
+	if ((rc = qeth_send_packet(card, skb))) {
 		if (rc == -EBUSY) {
-			netif_stop_queue(dev);
-			rc = NETDEV_TX_BUSY;
+			return NETDEV_TX_BUSY;
 		} else {
 			card->stats.tx_errors++;
 			card->stats.tx_dropped++;
 			dev_kfree_skb_any(skb);
-			/* set to OK; otherwise ksoftirqd goes to 100% */
+			/*set to OK; otherwise ksoftirqd goes to 100% */
 			rc = NETDEV_TX_OK;
 		}
 	}
-
+	netif_wake_queue(dev);
 #ifdef CONFIG_QETH_PERF_STATS
 	card->perf_stats.outbound_time += qeth_get_micros() -
 		card->perf_stats.outbound_start_time;
@@ -3711,22 +3710,16 @@ static inline int
 qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
 		 struct qeth_hdr **hdr, int ipv)
 {
-	struct sk_buff *new_skb;
+	int rc = 0;
 #ifdef CONFIG_QETH_VLAN
 	u16 *tag;
 #endif
 
 	QETH_DBF_TEXT(trace, 6, "prepskb");
-	if (skb_headroom(*skb) < sizeof(struct qeth_hdr)){
-		new_skb = skb_realloc_headroom(*skb, sizeof(struct qeth_hdr));
-		if (!new_skb) {
-			PRINT_ERR("qeth_prepare_skb: could "
-				  "not realloc headroom for qeth_hdr "
-				  "on interface %s", QETH_CARD_IFNAME(card));
-			return -ENOMEM;
-		}
-		*skb = new_skb;
-	}
+
+	rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
+	if (rc)
+		return rc;
 #ifdef CONFIG_QETH_VLAN
 	if (card->vlangrp && vlan_tx_tag_present(*skb) &&
 	    ((ipv == 6) || card->options.layer2) ) {
@@ -3748,20 +3741,10 @@ qeth_prepare_skb(struct qeth_card *card,
 		*(tag + 1) = htons(vlan_tx_tag_get(*skb));
 	}
 #endif
-	*hdr = (struct qeth_hdr *) skb_push(*skb, sizeof(struct qeth_hdr));
-	/*
-	 * sanity check, the Linux memory allocation scheme should
-	 * never present us cases like this one (the 32bytes header plus
-	 * the first 40 bytes of the paket cross a 4k boundary)
-	 */
-	if ((((unsigned long) *hdr) & (~(PAGE_SIZE - 1))) !=
-	    (((unsigned long) *hdr + sizeof(struct qeth_hdr) +
-	      QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
-		PRINT_ERR("qeth_prepare_skb: misaligned "
-			  "packet on interface %s. Discarded.",
-			  QETH_CARD_IFNAME(card));
+	*hdr = (struct qeth_hdr *)
+		qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
+	if (hdr == NULL)
 		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -3853,7 +3836,7 @@ qeth_layer2_fill_header(struct qeth_card
 #endif
 }
 
-static inline void
+void
 qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 		struct sk_buff *skb, int ipv, int cast_type)
 {
@@ -3910,21 +3893,59 @@ qeth_fill_header(struct qeth_card *card,
 	}
 }
 
-static inline int
-qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf,
-		 char *data, struct sk_buff *skb)
+static inline void
+__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
+			int *next_element_to_fill)
+{
+	int length = skb->len;
+	struct skb_frag_struct *frag;
+	int fragno;
+	unsigned long addr;
+	int element;
+	int first_lap = 1;
+
+	fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */
+	element = *next_element_to_fill + fragno;
+	while (length > 0) {
+		if (fragno > 0) {
+			frag = &skb_shinfo(skb)->frags[fragno - 1];
+			addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
+				frag->page_offset;
+			buffer->element[element].addr = (char *)addr;
+			buffer->element[element].length = frag->size;
+			length -= frag->size;
+			if (first_lap)
+				buffer->element[element].flags =
+				    SBAL_FLAGS_LAST_FRAG;
+			else
+				buffer->element[element].flags =
+				    SBAL_FLAGS_MIDDLE_FRAG;
+		} else {
+			buffer->element[element].addr = skb->data;
+			buffer->element[element].length = length;
+			length = 0;
+			buffer->element[element].flags =
+				SBAL_FLAGS_FIRST_FRAG;
+		}
+		element--;
+		fragno--;
+		first_lap = 0;
+	}
+	*next_element_to_fill += skb_shinfo(skb)->nr_frags + 1;
+}
+
+static inline void
+__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
+		   int *next_element_to_fill)
 {
-	struct qdio_buffer *buffer;
 	int length = skb->len;
 	int length_here;
 	int element;
+	char *data;
 	int first_lap = 1;
 
-	QETH_DBF_TEXT(trace, 6, "qdfillbf");
-	buffer = buf->buffer;
-	atomic_inc(&skb->users);
-	skb_queue_tail(&buf->skb_list, skb);
-	element = buf->next_element_to_fill;
+	element = *next_element_to_fill;
+	data = skb->data;
 	while (length > 0) {
 		/* length_here is the remaining amount of data in this page */
 		length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
@@ -3951,11 +3972,33 @@ qeth_fill_buffer(struct qeth_qdio_out_q 
 		element++;
 		first_lap = 0;
 	}
-	buf->next_element_to_fill = element;
+	*next_element_to_fill = element;
+}
+
+static inline int
+qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+		 struct qeth_qdio_out_buffer *buf,
+		 struct sk_buff *skb)
+{
+	struct qdio_buffer *buffer;
+	int flush_cnt = 0;
+
+	QETH_DBF_TEXT(trace, 6, "qdfillbf");
+	buffer = buf->buffer;
+	atomic_inc(&skb->users);
+	skb_queue_tail(&buf->skb_list, skb);
+	if (skb_shinfo(skb)->nr_frags == 0)
+		__qeth_fill_buffer(skb, buffer,
+				   (int *)&buf->next_element_to_fill);
+	else
+		__qeth_fill_buffer_frag(skb, buffer,
+					(int *)&buf->next_element_to_fill);
+
 	if (!queue->do_pack) {
 		QETH_DBF_TEXT(trace, 6, "fillbfnp");
 		/* set state to PRIMED -> will be flushed */
 		atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+		flush_cnt = 1;
 	} else {
 		QETH_DBF_TEXT(trace, 6, "fillbfpa");
 #ifdef CONFIG_QETH_PERF_STATS
@@ -3968,17 +4011,21 @@ qeth_fill_buffer(struct qeth_qdio_out_q 
 			 * -> will be flushed
 			 */
 			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			flush_cnt = 1;
 		}
 	}
-	return 0;
+	return flush_cnt;
 }
 
 static inline int
 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 			 struct sk_buff *skb, struct qeth_hdr *hdr,
-			 int elements_needed)
+			 int elements_needed,
+			 struct qeth_eddp_context *ctx)
 {
 	struct qeth_qdio_out_buffer *buffer;
+	int buffers_needed = 0;
+	int flush_cnt = 0;
 	int index;
 
 	QETH_DBF_TEXT(trace, 6, "dosndpfa");
@@ -3999,22 +4046,42 @@ qeth_do_send_packet_fast(struct qeth_car
 		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 		return -EBUSY;
 	}
-	queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
-				  QDIO_MAX_BUFFERS_PER_Q;
+	if (ctx == NULL)
+		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+					  QDIO_MAX_BUFFERS_PER_Q;
+	else {
+		buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
+		if (buffers_needed < 0) {
+			card->stats.tx_dropped++;
+			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+			return -EBUSY;
+		}
+		queue->next_buf_to_fill =
+			(queue->next_buf_to_fill + buffers_needed) %
+			QDIO_MAX_BUFFERS_PER_Q;
+	}
 	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
-	qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
-	qeth_flush_buffers(queue, 0, index, 1);
+	if (ctx == NULL) {
+		qeth_fill_buffer(queue, buffer, skb);
+		qeth_flush_buffers(queue, 0, index, 1);
+	} else {
+		flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
+		WARN_ON(buffers_needed != flush_cnt);
+		qeth_flush_buffers(queue, 0, index, flush_cnt);
+	}
 	return 0;
 }
 
 static inline int
 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 		    struct sk_buff *skb, struct qeth_hdr *hdr,
-		    int elements_needed)
+		    int elements_needed, struct qeth_eddp_context *ctx)
 {
 	struct qeth_qdio_out_buffer *buffer;
 	int start_index;
 	int flush_count = 0;
+	int do_pack = 0;
+	int tmp;
 	int rc = 0;
 
 	QETH_DBF_TEXT(trace, 6, "dosndpkt");
@@ -4037,34 +4104,56 @@ qeth_do_send_packet(struct qeth_card *ca
 	/* check if we need to switch packing state of this queue */
 	qeth_switch_to_packing_if_needed(queue);
 	if (queue->do_pack){
-		/* does packet fit in current buffer? */
-		if((QETH_MAX_BUFFER_ELEMENTS(card) -
-		    buffer->next_element_to_fill) < elements_needed){
-			/* ... no -> set state PRIMED */
-			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-			flush_count++;
-			queue->next_buf_to_fill =
-				(queue->next_buf_to_fill + 1) %
-				QDIO_MAX_BUFFERS_PER_Q;
-			buffer = &queue->bufs[queue->next_buf_to_fill];
-			/* we did a step forward, so check buffer state again */
-			if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
+		do_pack = 1;
+		if (ctx == NULL) {
+			/* does packet fit in current buffer? */
+			if((QETH_MAX_BUFFER_ELEMENTS(card) -
+			    buffer->next_element_to_fill) < elements_needed){
+				/* ... no -> set state PRIMED */
+				atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
+				flush_count++;
+				queue->next_buf_to_fill =
+					(queue->next_buf_to_fill + 1) %
+					QDIO_MAX_BUFFERS_PER_Q;
+				buffer = &queue->bufs[queue->next_buf_to_fill];
+				/* we did a step forward, so check buffer state
+				 * again */
+				if (atomic_read(&buffer->state) !=
+						QETH_QDIO_BUF_EMPTY){
+					card->stats.tx_dropped++;
+					qeth_flush_buffers(queue, 0, start_index, flush_count);
+					atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+					return -EBUSY;
+				}
+			}
+		} else {
+			/* check if we have enough elements (including following
+			 * free buffers) to handle eddp context */
+			if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
+				printk("eddp tx_dropped 1\n");
 				card->stats.tx_dropped++;
-				/* return EBUSY because we sent old packet, not
-				 * the current one */
 				rc = -EBUSY;
-				atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 				goto out;
 			}
 		}
 	}
-	qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
-	if (atomic_read(&buffer->state) == QETH_QDIO_BUF_PRIMED){
-		/* next time fill the next buffer */
-		flush_count++;
-		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
-			QDIO_MAX_BUFFERS_PER_Q;
+	if (ctx == NULL)
+		tmp = qeth_fill_buffer(queue, buffer, skb);
+	else {
+		tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
+		if (tmp < 0) {
+			printk("eddp tx_dropped 2\n");
+			card->stats.tx_dropped++;
+			rc = - EBUSY;
+			goto out;
+		}
 	}
+	queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
+				  QDIO_MAX_BUFFERS_PER_Q;
+	flush_count += tmp;
+out:
+	if (flush_count)
+		qeth_flush_buffers(queue, 0, start_index, flush_count);
 	/*
 	 * queue->state will go from LOCKED -> UNLOCKED or from
 	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
@@ -4072,6 +4161,8 @@ qeth_do_send_packet(struct qeth_card *ca
 	 * In that case we will enter this loop
 	 */
 	while (atomic_dec_return(&queue->state)){
+		flush_count = 0;
+		start_index = queue->next_buf_to_fill;
 		/* check if we can go back to non-packing state */
 		flush_count += qeth_switch_to_nonpacking_if_needed(queue);
 		/*
@@ -4080,11 +4171,14 @@ qeth_do_send_packet(struct qeth_card *ca
 		 */
 		if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
 			flush_count += qeth_flush_buffers_on_no_pci(queue);
+		if (flush_count)
+			qeth_flush_buffers(queue, 0, start_index, flush_count);
 	}
 	/* at this point the queue is UNLOCKED again */
-out:
-	if (flush_count)
-		qeth_flush_buffers(queue, 0, start_index, flush_count);
+#ifdef CONFIG_QETH_PERF_STATS
+	if (do_pack)
+		queue->card->perf_stats.bufs_sent_pack += flush_count;
+#endif /* CONFIG_QETH_PERF_STATS */
 
 	return rc;
 }
@@ -4096,7 +4190,9 @@ qeth_send_packet(struct qeth_card *card,
 	int cast_type;
 	struct qeth_qdio_out_q *queue;
 	struct qeth_hdr *hdr;
-	int elements_needed;
+	int elements_needed = 0;
+	enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
+	struct qeth_eddp_context *ctx = NULL;
 	int rc;
 
 	QETH_DBF_TEXT(trace, 6, "sendpkt");
@@ -4122,29 +4218,64 @@ qeth_send_packet(struct qeth_card *card,
 	queue = card->qdio.out_qs
 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
 
+	if (skb_shinfo(skb)->tso_size)
+		large_send = card->options.large_send;
+
 	if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
-		QETH_DBF_TEXT_(trace, 4, "1err%d", rc);
+		QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
 		return rc;
 	}
+	/*are we able to do TSO ? If so ,prepare and send it from here */
+	if ((large_send == QETH_LARGE_SEND_TSO) &&
+	    (cast_type == RTN_UNSPEC)) {
+		rc = qeth_tso_send_packet(card, skb, queue,
+					  ipv, cast_type);
+		goto do_statistics;
+	}
+
 	qeth_fill_header(card, hdr, skb, ipv, cast_type);
-	elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) + skb->len)
-				>> PAGE_SHIFT);
-	if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
-		PRINT_ERR("qeth_do_send_packet: invalid size of "
-				 "IP packet. Discarded.");
-		return -EINVAL;
+	if (large_send == QETH_LARGE_SEND_EDDP) {
+		ctx = qeth_eddp_create_context(card, skb, hdr);
+		if (ctx == NULL) {
+			PRINT_WARN("could not create eddp context\n");
+			return -EINVAL;
+		}
+	} else {
+		elements_needed = qeth_get_elements_no(card,(void*) hdr, skb);
+		if (!elements_needed)
+			return -EINVAL;
 	}
 
 	if (card->info.type != QETH_CARD_TYPE_IQD)
 		rc = qeth_do_send_packet(card, queue, skb, hdr,
-					 elements_needed);
+					 elements_needed, ctx);
 	else
 		rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
-					      elements_needed);
-
+					      elements_needed, ctx);
+do_statistics:
 	if (!rc){
 		card->stats.tx_packets++;
 		card->stats.tx_bytes += skb->len;
+#ifdef CONFIG_QETH_PERF_STATS
+		if (skb_shinfo(skb)->tso_size) {
+			card->perf_stats.large_send_bytes += skb->len;
+			card->perf_stats.large_send_cnt++;
+		}
+ 		if (skb_shinfo(skb)->nr_frags > 0){
+			card->perf_stats.sg_skbs_sent++;
+			/* nr_frags + skb->data */
+			card->perf_stats.sg_frags_sent +=
+				skb_shinfo(skb)->nr_frags + 1;
+		}
+#endif /* CONFIG_QETH_PERF_STATS */
+	}
+	if (ctx != NULL) {
+		/* drop creator's reference */
+		qeth_eddp_put_context(ctx);
+		/* free skb; it's not referenced by a buffer */
+		if (rc == 0)
+			dev_kfree_skb_any(skb);
+
 	}
 	return rc;
 }
@@ -4999,6 +5130,7 @@ out:
 static void
 qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
 {
+#ifdef CONFIG_QETH_IPV6
 	struct inet6_dev *in6_dev;
 	struct inet6_ifaddr *ifa;
 	struct qeth_ipaddr *addr;
@@ -5021,6 +5153,7 @@ qeth_free_vlan_addresses6(struct qeth_ca
 		}
 	}
 	in6_dev_put(in6_dev);
+#endif /* CONFIG_QETH_IPV6 */
 }
 
 static void
@@ -5620,7 +5753,7 @@ static int
 qeth_layer3_register_addr_entry(struct qeth_card *card,
 				struct qeth_ipaddr *addr)
 {
-	//char buf[50];
+	char buf[50];
 	int rc;
 	int cnt = 3;
 
@@ -5646,12 +5779,9 @@ qeth_layer3_register_addr_entry(struct q
 	} while ((--cnt > 0) && rc);
 	if (rc){
 		QETH_DBF_TEXT(trace, 2, "FAILED");
-		/* TODO: re-activate this warning as soon as we have a
-		 * clean mirco code
 		qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
-		PRINT_WARN("Could not register IP address %s (rc=%x)\n",
-			   buf, rc);
-		*/
+		PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
+			   buf, rc, rc);
 	}
 	return rc;
 }
@@ -5709,6 +5839,111 @@ qeth_deregister_addr_entry(struct qeth_c
 	return qeth_layer3_deregister_addr_entry(card, addr);
 }
 
+static u32
+qeth_ethtool_get_tx_csum(struct net_device *dev)
+{
+	/* We may need to say that we support tx csum offload if
+	 * we do EDDP or TSO. There are discussions going on to
+	 * enforce rules in the stack and in ethtool that make
+	 * SG and TSO depend on HW_CSUM. At the moment there are
+	 * no such rules....
+	 * If we say yes here, we have to checksum outbound packets
+	 * any time. */
+	return 0;
+}
+
+static int
+qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
+{
+	return -EINVAL;
+}
+
+static u32
+qeth_ethtool_get_rx_csum(struct net_device *dev)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	return (card->options.checksum_type == HW_CHECKSUMMING);
+}
+
+static int
+qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER))
+		return -EPERM;
+	if (data)
+		card->options.checksum_type = HW_CHECKSUMMING;
+	else
+		card->options.checksum_type = SW_CHECKSUMMING;
+	return 0;
+}
+
+static u32
+qeth_ethtool_get_sg(struct net_device *dev)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
+		(dev->features & NETIF_F_SG));
+}
+
+static int
+qeth_ethtool_set_sg(struct net_device *dev, u32 data)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	if (data) {
+		if (card->options.large_send != QETH_LARGE_SEND_NO)
+			dev->features |= NETIF_F_SG;
+		else {
+			dev->features &= ~NETIF_F_SG;
+			return -EINVAL;
+		}
+	} else
+		dev->features &= ~NETIF_F_SG;
+	return 0;
+}
+
+static u32
+qeth_ethtool_get_tso(struct net_device *dev)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
+		(dev->features & NETIF_F_TSO));
+}
+
+static int
+qeth_ethtool_set_tso(struct net_device *dev, u32 data)
+{
+	struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+	if (data) {
+		if (card->options.large_send != QETH_LARGE_SEND_NO)
+			dev->features |= NETIF_F_TSO;
+		else {
+			dev->features &= ~NETIF_F_TSO;
+			return -EINVAL;
+		}
+	} else
+		dev->features &= ~NETIF_F_TSO;
+	return 0;
+}
+
+static struct ethtool_ops qeth_ethtool_ops = {
+	.get_tx_csum = qeth_ethtool_get_tx_csum,
+	.set_tx_csum = qeth_ethtool_set_tx_csum,
+	.get_rx_csum = qeth_ethtool_get_rx_csum,
+	.set_rx_csum = qeth_ethtool_set_rx_csum,
+	.get_sg      = qeth_ethtool_get_sg,
+	.set_sg      = qeth_ethtool_set_sg,
+	.get_tso     = qeth_ethtool_get_tso,
+	.set_tso     = qeth_ethtool_set_tso,
+};
+
 static int
 qeth_netdev_init(struct net_device *dev)
 {
@@ -5758,6 +5993,8 @@ qeth_netdev_init(struct net_device *dev)
 	dev->addr_len = OSA_ADDR_LEN;
 	dev->mtu = card->info.initial_mtu;
 
+	SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
+
 	SET_MODULE_OWNER(dev);
 	return 0;
 }
@@ -6149,6 +6386,9 @@ qeth_query_ipassists_cb(struct qeth_card
 		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
 #endif
 	}
+	QETH_DBF_TEXT(setup, 2, "suppenbl");
+	QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
+	QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
 	return 0;
 }
 
@@ -6549,26 +6789,33 @@ qeth_start_ipa_checksum(struct qeth_card
 	return rc;
 }
 
-/*
-static inline void
-qeth_print_ipassist_status(struct qeth_card *card)
+static int
+qeth_start_ipa_tso(struct qeth_card *card)
 {
-	char buf[255];
-	int offset = 0;
+	int rc;
+
+	QETH_DBF_TEXT(trace,3,"sttso");
 
-	offset += sprintf(buf, "IPAssist options of %s: ", card->info.if_name);
-	if (qeth_is_enabled(card, IPA_ARP_PROCESSING))
-		offset += sprintf(buf+offset, "ARP ");
-	if (qeth_is_enabled(card, IPA_IP_FRAGMENTATION))
-		offset += sprintf(buf+offset, "IP_FRAG");
-	if (qeth_is_enabled(card, IPA_SOURCE_MAC))
-		offset += sprintf(buf+offset, "SRC_MAC");
-	if (qeth_is_enabled(card, IPA_FULL_VLAN))
-		offset += sprintf(buf+offset, "VLAN");
-	if (qeth_is_enabled(card, IPA_VLAN_PRIO))
-		offset += sprintf(buf+offset, "VLAN_PRIO");
+	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+		PRINT_WARN("Outbound TSO not supported on %s\n",
+			   QETH_CARD_IFNAME(card));
+		rc = -EOPNOTSUPP;
+	} else {
+		rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+						  IPA_CMD_ASS_START,0);
+		if (rc)
+			PRINT_WARN("Could not start outbound TSO "
+				   "assist on %s: rc=%i\n",
+				   QETH_CARD_IFNAME(card), rc);
+		else
+			PRINT_INFO("Outbound TSO enabled\n");
+	}
+	if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
+		card->options.large_send = QETH_LARGE_SEND_NO;
+		card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
+	}
+	return rc;
 }
-*/
 
 static int
 qeth_start_ipassists(struct qeth_card *card)
@@ -6582,6 +6829,7 @@ qeth_start_ipassists(struct qeth_card *c
 	qeth_start_ipa_ipv6(card);		/* go on*/
 	qeth_start_ipa_broadcast(card);		/* go on*/
 	qeth_start_ipa_checksum(card);		/* go on*/
+	qeth_start_ipa_tso(card);		/* go on*/
 	return 0;
 }
 
@@ -6693,6 +6941,40 @@ qeth_setrouting_v6(struct qeth_card *car
 	return rc;
 }
 
+int
+qeth_set_large_send(struct qeth_card *card)
+{
+	int rc = 0;
+
+	if (card->dev == NULL)
+		return 0;
+
+	netif_stop_queue(card->dev);
+	switch (card->options.large_send) {
+	case QETH_LARGE_SEND_EDDP:
+		card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+		break;
+	case QETH_LARGE_SEND_TSO:
+		if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
+			card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+		} else {
+			PRINT_WARN("TSO not supported on %s. "
+				   "large_send set to 'no'.\n",
+				   card->dev->name);
+			card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+			card->options.large_send = QETH_LARGE_SEND_NO;
+			rc = -EOPNOTSUPP;
+		}
+		break;
+	default: /* includes QETH_LARGE_SEND_NO */
+		card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+		break;
+	}
+
+	netif_wake_queue(card->dev);
+	return rc;
+}
+
 /*
  * softsetup card: init IPA stuff
  */
@@ -6730,6 +7012,12 @@ qeth_softsetup_card(struct qeth_card *ca
 #endif
 		goto out;
 	}
+	if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
+	    (card->options.large_send == QETH_LARGE_SEND_TSO))
+		card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+	else
+		card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+
 	if ((rc = qeth_setadapter_parms(card)))
 		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
 	if ((rc = qeth_start_ipassists(card)))
diff -urpN linux-2.6/drivers/s390/net/qeth_mpc.h linux-2.6-patched/drivers/s390/net/qeth_mpc.h
--- linux-2.6/drivers/s390/net/qeth_mpc.h	2005-03-02 08:38:08.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_mpc.h	2005-03-24 14:03:05.000000000 +0100
@@ -14,7 +14,7 @@
 
 #include <asm/qeth.h>
 
-#define VERSION_QETH_MPC_H "$Revision: 1.38 $"
+#define VERSION_QETH_MPC_H "$Revision: 1.43 $"
 
 extern const char *VERSION_QETH_MPC_C;
 
@@ -182,6 +182,9 @@ enum qeth_ipa_funcs {
 	IPA_FULL_VLAN           = 0x00004000L,
 	IPA_SOURCE_MAC          = 0x00010000L,
 	IPA_OSA_MC_ROUTER       = 0x00020000L,
+	IPA_QUERY_ARP_ASSIST	= 0x00040000L,
+	IPA_INBOUND_TSO         = 0x00080000L,
+	IPA_OUTBOUND_TSO        = 0x00100000L,
 };
 
 /* SETIP/DELIP IPA Command: ***************************************************/
diff -urpN linux-2.6/drivers/s390/net/qeth_proc.c linux-2.6-patched/drivers/s390/net/qeth_proc.c
--- linux-2.6/drivers/s390/net/qeth_proc.c	2005-03-02 08:38:10.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_proc.c	2005-03-24 14:03:05.000000000 +0100
@@ -236,6 +236,14 @@ qeth_perf_procfile_seq_show(struct seq_f
 		   card->perf_stats.skbs_sent_pack,
 		   card->perf_stats.bufs_sent_pack
 		  );
+	seq_printf(s, "  Skbs sent in SG mode                   : %i\n"
+		      "  Skb fragments sent in SG mode          : %i\n\n",
+		      card->perf_stats.sg_skbs_sent,
+		      card->perf_stats.sg_frags_sent);
+	seq_printf(s, "  large_send tx (in Kbytes)              : %i\n"
+		      "  large_send count                       : %i\n\n",
+		      card->perf_stats.large_send_bytes >> 10,
+		      card->perf_stats.large_send_cnt);
 	seq_printf(s, "  Packing state changes no pkg.->packing : %i/%i\n"
 		      "  Watermarks L/H                         : %i/%i\n"
 		      "  Current buffer usage (outbound q's)    : "
@@ -262,7 +270,7 @@ qeth_perf_procfile_seq_show(struct seq_f
 		      "  Outbound time (in us, incl QDIO)       : %i\n"
 		      "  Outbound count                         : %i\n"
 		      "  Outbound do_QDIO time (in us)          : %i\n"
-		      "  Outbound do_QDIO count                 : %i\n",
+		      "  Outbound do_QDIO count                 : %i\n\n",
 		        card->perf_stats.inbound_time,
 			card->perf_stats.inbound_cnt,
 		        card->perf_stats.inbound_do_qdio_time,
@@ -274,7 +282,6 @@ qeth_perf_procfile_seq_show(struct seq_f
 		        card->perf_stats.outbound_do_qdio_time,
 			card->perf_stats.outbound_do_qdio_cnt
 		  );
-
 	return 0;
 }
 
diff -urpN linux-2.6/drivers/s390/net/qeth_sys.c linux-2.6-patched/drivers/s390/net/qeth_sys.c
--- linux-2.6/drivers/s390/net/qeth_sys.c	2005-03-24 14:03:05.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_sys.c	2005-03-24 14:03:05.000000000 +0100
@@ -742,6 +742,61 @@ static DEVICE_ATTR(layer2, 0644, qeth_de
 		   qeth_dev_layer2_store);
 
 static ssize_t
+qeth_dev_large_send_show(struct device *dev, char *buf)
+{
+	struct qeth_card *card = dev->driver_data;
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->options.large_send) {
+	case QETH_LARGE_SEND_NO:
+		return sprintf(buf, "%s\n", "no");
+	case QETH_LARGE_SEND_EDDP:
+		return sprintf(buf, "%s\n", "EDDP");
+	case QETH_LARGE_SEND_TSO:
+		return sprintf(buf, "%s\n", "TSO");
+	default:
+		return sprintf(buf, "%s\n", "N/A");
+	}
+}
+
+static ssize_t
+qeth_dev_large_send_store(struct device *dev, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev->driver_data;
+	enum qeth_large_send_types type;
+	int rc = 0;
+	char *tmp;
+
+	if (!card)
+		return -EINVAL;
+
+	tmp = strsep((char **) &buf, "\n");
+
+	if (!strcmp(tmp, "no")){
+		type = QETH_LARGE_SEND_NO;
+	} else if (!strcmp(tmp, "EDDP")) {
+		type = QETH_LARGE_SEND_EDDP;
+	} else if (!strcmp(tmp, "TSO")) {
+		type = QETH_LARGE_SEND_TSO;
+	} else {
+		PRINT_WARN("large_send: invalid mode %s!\n", tmp);
+		return -EINVAL;
+	}
+	if (card->options.large_send == type)
+		return count;
+	card->options.large_send = type;
+	if ((rc = qeth_set_large_send(card)))
+		return rc;
+
+	return count;
+}
+
+static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
+		   qeth_dev_large_send_store);
+
+static ssize_t
 qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
 {
 
@@ -875,6 +930,7 @@ static struct device_attribute * qeth_de
 	&dev_attr_broadcast_mode,
 	&dev_attr_canonical_macaddr,
 	&dev_attr_layer2,
+	&dev_attr_large_send,
 	NULL,
 };
 
diff -urpN linux-2.6/drivers/s390/net/qeth_tso.c linux-2.6-patched/drivers/s390/net/qeth_tso.c
--- linux-2.6/drivers/s390/net/qeth_tso.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_tso.c	2005-03-24 14:03:05.000000000 +0100
@@ -0,0 +1,285 @@
+/*
+ * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
+ *
+ * Header file for qeth TCP Segmentation Offload support.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ *    Author(s): Frank Pavlic <pavlic@xxxxxxxxxx>
+ *
+ *    $Revision: 1.6 $	 $Date: 2005/03/24 09:04:18 $
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_tso.h"
+
+/**
+ * skb already partially prepared
+ * classic qdio header in skb->data
+ * */
+static inline struct qeth_hdr_tso *
+qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(trace, 5, "tsoprsk");
+	rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
+	if (rc)
+		return NULL;
+
+	return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
+}
+
+/**
+ * fill header for a TSO packet
+ */
+static inline void
+qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
+{
+	struct qeth_hdr_tso *hdr;
+	struct tcphdr *tcph;
+	struct iphdr *iph;
+
+	QETH_DBF_TEXT(trace, 5, "tsofhdr");
+
+	hdr  = (struct qeth_hdr_tso *) skb->data;
+	iph  = skb->nh.iph;
+	tcph = skb->h.th;
+	/*fix header to TSO values ...*/
+	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+	/*set values which are fix for the first approach ...*/
+	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
+	hdr->ext.imb_hdr_no  = 1;
+	hdr->ext.hdr_type    = 1;
+	hdr->ext.hdr_version = 1;
+	hdr->ext.hdr_len     = 28;
+	/*insert non-fix values */
+	hdr->ext.mss = skb_shinfo(skb)->tso_size;
+	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
+	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
+				       sizeof(struct qeth_hdr_tso));
+}
+
+/**
+ * change some header values as requested by hardware
+ */
+static inline void
+qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct ipv6hdr *ip6h;
+	struct tcphdr *tcph;
+
+	iph  = skb->nh.iph;
+	ip6h = skb->nh.ipv6h;
+	tcph = skb->h.th;
+
+	tcph->check = 0;
+	if (skb->protocol == ETH_P_IPV6) {
+		ip6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					       0, IPPROTO_TCP, 0);
+		return;
+	}
+	/*OSA want us to set these values ...*/
+	tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					 0, IPPROTO_TCP, 0);
+	iph->tot_len = 0;
+	iph->check = 0;
+}
+
+static inline struct qeth_hdr_tso *
+qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
+			int ipv, int cast_type)
+{
+	struct qeth_hdr_tso *hdr;
+	int rc = 0;
+
+	QETH_DBF_TEXT(trace, 5, "tsoprep");
+
+	/*get headroom for tso qdio header */
+	hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
+	if (hdr == NULL) {
+		QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
+		return NULL;
+	}
+	memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+	/*fill first 32 bytes of  qdio header as used
+	 *FIXME: TSO has two struct members
+	 * with different names but same size
+	 * */
+	qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
+	qeth_tso_fill_header(card, skb);
+	qeth_tso_set_tcpip_header(card, skb);
+	return hdr;
+}
+
+static inline int
+qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
+{
+	struct qeth_qdio_out_buffer *buffer;
+	int flush_cnt = 0;
+
+	QETH_DBF_TEXT(trace, 5, "tsobuf");
+
+	/* force to non-packing*/
+	if (queue->do_pack)
+		queue->do_pack = 0;
+	buffer = &queue->bufs[queue->next_buf_to_fill];
+	/* get a new buffer if current is already in use*/
+	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+	    (buffer->next_element_to_fill > 0)) {
+		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+					  QDIO_MAX_BUFFERS_PER_Q;
+		flush_cnt++;
+	}
+	return flush_cnt;
+}
+
+static inline void
+__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
+			  struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag;
+	struct qdio_buffer *buffer;
+	int fragno, cnt, element;
+	unsigned long addr;
+
+        QETH_DBF_TEXT(trace, 6, "tsfilfrg");
+
+	/*initialize variables ...*/
+	fragno = skb_shinfo(skb)->nr_frags;
+	buffer = buf->buffer;
+	element = buf->next_element_to_fill;
+	/*fill buffer elements .....*/
+	for (cnt = 0; cnt < fragno; cnt++) {
+		frag = &skb_shinfo(skb)->frags[cnt];
+		addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
+			frag->page_offset;
+		buffer->element[element].addr = (char *)addr;
+		buffer->element[element].length = frag->size;
+		if (cnt < (fragno - 1))
+			buffer->element[element].flags =
+				SBAL_FLAGS_MIDDLE_FRAG;
+		else
+			buffer->element[element].flags =
+				SBAL_FLAGS_LAST_FRAG;
+		element++;
+	}
+	buf->next_element_to_fill = element;
+}
+
+static inline int
+qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
+		     struct sk_buff *skb)
+{
+        int length, length_here, element;
+        int hdr_len;
+	struct qdio_buffer *buffer;
+	struct qeth_hdr_tso *hdr;
+	char *data;
+
+        QETH_DBF_TEXT(trace, 3, "tsfilbuf");
+
+	/*increment user count and queue skb ...*/
+        atomic_inc(&skb->users);
+        skb_queue_tail(&buf->skb_list, skb);
+
+	/*initialize all variables...*/
+        buffer = buf->buffer;
+	hdr = (struct qeth_hdr_tso *)skb->data;
+	hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
+	data = skb->data + hdr_len;
+	length = skb->len - hdr_len;
+        element = buf->next_element_to_fill;
+	/*fill first buffer entry only with header information */
+	buffer->element[element].addr = skb->data;
+	buffer->element[element].length = hdr_len;
+	buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+	buf->next_element_to_fill++;
+
+	if (skb_shinfo(skb)->nr_frags > 0) {
+                 __qeth_tso_fill_buffer_frag(buf, skb);
+                 goto out;
+        }
+
+       /*start filling buffer entries ...*/
+        element++;
+        while (length > 0) {
+                /* length_here is the remaining amount of data in this page */
+		length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
+		if (length < length_here)
+                        length_here = length;
+                buffer->element[element].addr = data;
+                buffer->element[element].length = length_here;
+                length -= length_here;
+                if (!length)
+                        buffer->element[element].flags =
+                                SBAL_FLAGS_LAST_FRAG;
+                 else
+                         buffer->element[element].flags =
+                                 SBAL_FLAGS_MIDDLE_FRAG;
+                data += length_here;
+                element++;
+        }
+        /*set the buffer to primed  ...*/
+        buf->next_element_to_fill = element;
+out:
+	atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+        return 1;
+}
+
+int
+qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
+		     struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+	int flush_cnt = 0;
+	struct qeth_hdr_tso *hdr;
+	struct qeth_qdio_out_buffer *buffer;
+        int start_index;
+
+	QETH_DBF_TEXT(trace, 3, "tsosend");
+
+	if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
+	     	return -ENOMEM;
+	/*check if skb fits in one SBAL ...*/
+	if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
+		return -EINVAL;
+	/*lock queue, force switching to non-packing and send it ...*/
+	while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+                                       QETH_OUT_Q_LOCKED,
+                                       &queue->state));
+        start_index = queue->next_buf_to_fill;
+        buffer = &queue->bufs[queue->next_buf_to_fill];
+	/*check if card is too busy ...*/
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
+		card->stats.tx_dropped++;
+		goto out;
+	}
+	/*let's force to non-packing and get a new SBAL*/
+	flush_cnt += qeth_tso_get_queue_buffer(queue);
+	buffer = &queue->bufs[queue->next_buf_to_fill];
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+		card->stats.tx_dropped++;
+		goto out;
+	}
+	flush_cnt += qeth_tso_fill_buffer(buffer, skb);
+	queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+				   QDIO_MAX_BUFFERS_PER_Q;
+out:
+	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+	if (flush_cnt)
+		qeth_flush_buffers(queue, 0, start_index, flush_cnt);
+	/*do some statistics */
+	card->stats.tx_packets++;
+	card->stats.tx_bytes += skb->len;
+	return 0;
+}
diff -urpN linux-2.6/drivers/s390/net/qeth_tso.h linux-2.6-patched/drivers/s390/net/qeth_tso.h
--- linux-2.6/drivers/s390/net/qeth_tso.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6-patched/drivers/s390/net/qeth_tso.h	2005-03-24 14:03:05.000000000 +0100
@@ -0,0 +1,58 @@
+/*
+ * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $)
+ *
+ * Header file for qeth TCP Segmentation Offload support.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ *    Author(s): Frank Pavlic <pavlic@xxxxxxxxxx>
+ *
+ *    $Revision: 1.4 $	 $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#ifndef __QETH_TSO_H__
+#define __QETH_TSO_H__
+
+
+extern int
+qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
+		     struct qeth_qdio_out_q *, int , int);
+
+struct qeth_hdr_ext_tso {
+        __u16 hdr_tot_len;
+        __u8  imb_hdr_no;
+        __u8  reserved;
+        __u8  hdr_type;
+        __u8  hdr_version;
+        __u16 hdr_len;
+        __u32 payload_len;
+        __u16 mss;
+        __u16 dg_hdr_len;
+        __u8  padding[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_tso {
+        struct qeth_hdr hdr; 	/*hdr->hdr.l3.xxx*/
+	struct qeth_hdr_ext_tso ext;
+} __attribute__ ((packed));
+
+/*some helper functions*/
+
+static inline int
+qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
+{
+	int elements_needed = 0;
+
+	if (skb_shinfo(skb)->nr_frags > 0)
+		elements_needed = (skb_shinfo(skb)->nr_frags + 1);
+	if (elements_needed == 0 )
+		elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+					+ skb->len) >> PAGE_SHIFT);
+	if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
+		PRINT_ERR("qeth_do_send_packet: invalid size of "
+			  "IP packet. Discarded.");
+		return 0;
+	}
+	return elements_needed;
+}
+#endif /* __QETH_TSO_H__ */
-
: send the line "unsubscribe linux-net" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux