[PATCH] kfree cleanup: drivers/s390
[safe/jmp/linux-2.6] / drivers / s390 / net / qeth_eddp.c
1 /*
2  *
3  * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
4  *
5  * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6  *
7  * Copyright 2004 IBM Corporation
8  *
9  *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
10  *
11  *    $Revision: 1.13 $  $Date: 2005/05/04 20:19:18 $
12  *
13  */
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/ip.h>
17 #include <linux/inetdevice.h>
18 #include <linux/netdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/tcp.h>
21 #include <net/tcp.h>
22 #include <linux/skbuff.h>
23
24 #include <net/ip.h>
25
26 #include "qeth.h"
27 #include "qeth_mpc.h"
28 #include "qeth_eddp.h"
29
30 int
31 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
32                                     struct qeth_eddp_context *ctx)
33 {
34         int index = queue->next_buf_to_fill;
35         int elements_needed = ctx->num_elements;
36         int elements_in_buffer;
37         int skbs_in_buffer;
38         int buffers_needed = 0;
39
40         QETH_DBF_TEXT(trace, 5, "eddpcbfc");
41         while(elements_needed > 0) {
42                 buffers_needed++;
43                 if (atomic_read(&queue->bufs[index].state) !=
44                                 QETH_QDIO_BUF_EMPTY)
45                         return -EBUSY;
46
47                 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
48                                      queue->bufs[index].next_element_to_fill;
49                 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
50                 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
51                 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
52         }
53         return buffers_needed;
54 }
55
56 static inline void
57 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
58 {
59         int i;
60
61         QETH_DBF_TEXT(trace, 5, "eddpfctx");
62         for (i = 0; i < ctx->num_pages; ++i)
63                 free_page((unsigned long)ctx->pages[i]);
64         kfree(ctx->pages);
65         kfree(ctx->elements);
66         kfree(ctx);
67 }
68
69
70 static inline void
71 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
72 {
73         atomic_inc(&ctx->refcnt);
74 }
75
76 void
77 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
78 {
79         if (atomic_dec_return(&ctx->refcnt) == 0)
80                 qeth_eddp_free_context(ctx);
81 }
82
83 void
84 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
85 {
86         struct qeth_eddp_context_reference *ref;
87         
88         QETH_DBF_TEXT(trace, 6, "eddprctx");
89         while (!list_empty(&buf->ctx_list)){
90                 ref = list_entry(buf->ctx_list.next,
91                                  struct qeth_eddp_context_reference, list);
92                 qeth_eddp_put_context(ref->ctx);
93                 list_del(&ref->list);
94                 kfree(ref);
95         }
96 }
97
98 static inline int
99 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
100                           struct qeth_eddp_context *ctx)
101 {
102         struct qeth_eddp_context_reference *ref;
103
104         QETH_DBF_TEXT(trace, 6, "eddprfcx");
105         ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
106         if (ref == NULL)
107                 return -ENOMEM;
108         qeth_eddp_get_context(ctx);
109         ref->ctx = ctx;
110         list_add_tail(&ref->list, &buf->ctx_list);
111         return 0;
112 }
113
114 int
115 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
116                       struct qeth_eddp_context *ctx,
117                       int index)
118 {
119         struct qeth_qdio_out_buffer *buf = NULL;
120         struct qdio_buffer *buffer;
121         int elements = ctx->num_elements;
122         int element = 0;
123         int flush_cnt = 0;
124         int must_refcnt = 1;
125         int i;
126
127         QETH_DBF_TEXT(trace, 5, "eddpfibu");
128         while (elements > 0) {
129                 buf = &queue->bufs[index];
130                 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
131                         /* normally this should not happen since we checked for
132                          * available elements in qeth_check_elements_for_context
133                          */
134                         if (element == 0)
135                                 return -EBUSY;
136                         else {
137                                 PRINT_WARN("could only partially fill eddp "
138                                            "buffer!\n");
139                                 goto out;
140                         }
141                 }               
142                 /* check if the whole next skb fits into current buffer */
143                 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
144                                         buf->next_element_to_fill)
145                                 < ctx->elements_per_skb){
146                         /* no -> go to next buffer */
147                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
148                         index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
149                         flush_cnt++;
150                         /* new buffer, so we have to add ctx to buffer'ctx_list
151                          * and increment ctx's refcnt */
152                         must_refcnt = 1;
153                         continue;
154                 }       
155                 if (must_refcnt){
156                         must_refcnt = 0;
157                         if (qeth_eddp_buf_ref_context(buf, ctx)){
158                                 PRINT_WARN("no memory to create eddp context "
159                                            "reference\n");
160                                 goto out_check;
161                         }
162                 }
163                 buffer = buf->buffer;
164                 /* fill one skb into buffer */
165                 for (i = 0; i < ctx->elements_per_skb; ++i){
166                         buffer->element[buf->next_element_to_fill].addr =
167                                 ctx->elements[element].addr;
168                         buffer->element[buf->next_element_to_fill].length =
169                                 ctx->elements[element].length;
170                         buffer->element[buf->next_element_to_fill].flags =
171                                 ctx->elements[element].flags;
172                         buf->next_element_to_fill++;
173                         element++;
174                         elements--;
175                 }
176         }
177 out_check:
178         if (!queue->do_pack) {
179                 QETH_DBF_TEXT(trace, 6, "fillbfnp");
180                 /* set state to PRIMED -> will be flushed */
181                 if (buf->next_element_to_fill > 0){
182                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
183                         flush_cnt++;
184                 }
185         } else {
186 #ifdef CONFIG_QETH_PERF_STATS
187                 queue->card->perf_stats.skbs_sent_pack++;
188 #endif
189                 QETH_DBF_TEXT(trace, 6, "fillbfpa");
190                 if (buf->next_element_to_fill >=
191                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
192                         /*
193                          * packed buffer if full -> set state PRIMED
194                          * -> will be flushed
195                          */
196                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
197                         flush_cnt++;
198                 }
199         }
200 out:
201         return flush_cnt;
202 }
203
204 static inline void
205 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
206                               struct qeth_eddp_data *eddp, int data_len)
207 {
208         u8 *page;
209         int page_remainder;
210         int page_offset;
211         int pkt_len;
212         struct qeth_eddp_element *element;
213
214         QETH_DBF_TEXT(trace, 5, "eddpcrsh");
215         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
216         page_offset = ctx->offset % PAGE_SIZE;
217         element = &ctx->elements[ctx->num_elements];
218         pkt_len = eddp->nhl + eddp->thl + data_len;
219         /* FIXME: layer2 and VLAN !!! */
220         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
221                 pkt_len += ETH_HLEN;
222         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
223                 pkt_len += VLAN_HLEN;
224         /* does complete packet fit in current page ? */
225         page_remainder = PAGE_SIZE - page_offset;
226         if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
227                 /* no -> go to start of next page */
228                 ctx->offset += page_remainder;
229                 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
230                 page_offset = 0;
231         }
232         memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
233         element->addr = page + page_offset;
234         element->length = sizeof(struct qeth_hdr);
235         ctx->offset += sizeof(struct qeth_hdr);
236         page_offset += sizeof(struct qeth_hdr);
237         /* add mac header (?) */
238         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
239                 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
240                 element->length += ETH_HLEN;
241                 ctx->offset += ETH_HLEN;
242                 page_offset += ETH_HLEN;
243         }
244         /* add VLAN tag */
245         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
246                 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
247                 element->length += VLAN_HLEN;
248                 ctx->offset += VLAN_HLEN;
249                 page_offset += VLAN_HLEN;
250         }
251         /* add network header */
252         memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
253         element->length += eddp->nhl;
254         eddp->nh_in_ctx = page + page_offset;
255         ctx->offset += eddp->nhl;
256         page_offset += eddp->nhl;
257         /* add transport header */
258         memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
259         element->length += eddp->thl;
260         eddp->th_in_ctx = page + page_offset;
261         ctx->offset += eddp->thl;
262 }
263
264 static inline void
265 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
266                         u32 *hcsum)
267 {
268         struct skb_frag_struct *frag;
269         int left_in_frag;
270         int copy_len;
271         u8 *src;
272         
273         QETH_DBF_TEXT(trace, 5, "eddpcdtc");
274         if (skb_shinfo(eddp->skb)->nr_frags == 0) {
275                 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
276                 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
277                                       *hcsum);
278                 eddp->skb_offset += len;
279         } else {
280                 while (len > 0) {
281                         if (eddp->frag < 0) {
282                                 /* we're in skb->data */
283                                 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
284                                                 - eddp->skb_offset;
285                                 src = eddp->skb->data + eddp->skb_offset;
286                         } else {
287                                 frag = &skb_shinfo(eddp->skb)->
288                                         frags[eddp->frag];
289                                 left_in_frag = frag->size - eddp->frag_offset;
290                                 src = (u8 *)(
291                                         (page_to_pfn(frag->page) << PAGE_SHIFT)+
292                                         frag->page_offset + eddp->frag_offset);
293                         }
294                         if (left_in_frag <= 0) {
295                                 eddp->frag++;
296                                 eddp->frag_offset = 0;
297                                 continue;
298                         }
299                         copy_len = min(left_in_frag, len);
300                         memcpy(dst, src, copy_len);
301                         *hcsum = csum_partial(src, copy_len, *hcsum);
302                         dst += copy_len;
303                         eddp->frag_offset += copy_len;
304                         eddp->skb_offset += copy_len;
305                         len -= copy_len;
306                 }
307         }
308 }
309
310 static inline void
311 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
312                                   struct qeth_eddp_data *eddp, int data_len,
313                                   u32 hcsum)
314 {
315         u8 *page;
316         int page_remainder;
317         int page_offset;
318         struct qeth_eddp_element *element;
319         int first_lap = 1;
320
321         QETH_DBF_TEXT(trace, 5, "eddpcsdt");
322         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
323         page_offset = ctx->offset % PAGE_SIZE;
324         element = &ctx->elements[ctx->num_elements];
325         while (data_len){
326                 page_remainder = PAGE_SIZE - page_offset;
327                 if (page_remainder < data_len){
328                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
329                                                 page_remainder, &hcsum);
330                         element->length += page_remainder;
331                         if (first_lap)
332                                 element->flags = SBAL_FLAGS_FIRST_FRAG;
333                         else
334                                 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
335                         ctx->num_elements++;
336                         element++;
337                         data_len -= page_remainder;
338                         ctx->offset += page_remainder;
339                         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
340                         page_offset = 0;
341                         element->addr = page + page_offset;
342                 } else {
343                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
344                                                 data_len, &hcsum);
345                         element->length += data_len;
346                         if (!first_lap)
347                                 element->flags = SBAL_FLAGS_LAST_FRAG;
348                         ctx->num_elements++;
349                         ctx->offset += data_len;
350                         data_len = 0;
351                 }
352                 first_lap = 0;
353         }
354         ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
355 }
356
357 static inline u32
358 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
359 {
360         u32 phcsum; /* pseudo header checksum */
361
362         QETH_DBF_TEXT(trace, 5, "eddpckt4");
363         eddp->th.tcp.h.check = 0;
364         /* compute pseudo header checksum */
365         phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
366                                     eddp->thl + data_len, IPPROTO_TCP, 0);
367         /* compute checksum of tcp header */
368         return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
369 }
370
371 static inline u32
372 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
373 {
374         u32 proto;
375         u32 phcsum; /* pseudo header checksum */
376
377         QETH_DBF_TEXT(trace, 5, "eddpckt6");
378         eddp->th.tcp.h.check = 0;
379         /* compute pseudo header checksum */
380         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
381                               sizeof(struct in6_addr), 0);
382         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
383                               sizeof(struct in6_addr), phcsum);
384         proto = htonl(IPPROTO_TCP);
385         phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
386         return phcsum;
387 }
388
389 static inline struct qeth_eddp_data *
390 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
391 {
392         struct qeth_eddp_data *eddp;
393
394         QETH_DBF_TEXT(trace, 5, "eddpcrda");
395         eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
396         if (eddp){
397                 memset(eddp, 0, sizeof(struct qeth_eddp_data));
398                 eddp->nhl = nhl;
399                 eddp->thl = thl;
400                 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
401                 memcpy(&eddp->nh, nh, nhl);
402                 memcpy(&eddp->th, th, thl);
403                 eddp->frag = -1; /* initially we're in skb->data */
404         }
405         return eddp;
406 }
407
408 static inline void
409 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
410                              struct qeth_eddp_data *eddp)
411 {
412         struct tcphdr *tcph;
413         int data_len;
414         u32 hcsum;
415         
416         QETH_DBF_TEXT(trace, 5, "eddpftcp");
417         eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
418         tcph = eddp->skb->h.th;
419         while (eddp->skb_offset < eddp->skb->len) {
420                 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
421                                (int)(eddp->skb->len - eddp->skb_offset));
422                 /* prepare qdio hdr */
423                 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
424                         eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
425                                                      eddp->nhl + eddp->thl -
426                                                      sizeof(struct qeth_hdr);
427 #ifdef CONFIG_QETH_VLAN
428                         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
429                                 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
430 #endif /* CONFIG_QETH_VLAN */
431                 } else
432                         eddp->qh.hdr.l3.length = data_len + eddp->nhl +
433                                                  eddp->thl;
434                 /* prepare ip hdr */
435                 if (eddp->skb->protocol == ETH_P_IP){
436                         eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
437                                                  eddp->thl;
438                         eddp->nh.ip4.h.check = 0;
439                         eddp->nh.ip4.h.check =
440                                 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
441                                                 eddp->nh.ip4.h.ihl);
442                 } else
443                         eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
444                 /* prepare tcp hdr */
445                 if (data_len == (eddp->skb->len - eddp->skb_offset)){
446                         /* last segment -> set FIN and PSH flags */
447                         eddp->th.tcp.h.fin = tcph->fin;
448                         eddp->th.tcp.h.psh = tcph->psh;
449                 }
450                 if (eddp->skb->protocol == ETH_P_IP)
451                         hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
452                 else
453                         hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
454                 /* fill the next segment into the context */
455                 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
456                 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
457                 if (eddp->skb_offset >= eddp->skb->len)
458                         break;
459                 /* prepare headers for next round */
460                 if (eddp->skb->protocol == ETH_P_IP)
461                         eddp->nh.ip4.h.id++;
462                 eddp->th.tcp.h.seq += data_len;
463         }
464 }
465                            
466 static inline int
467 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
468                            struct sk_buff *skb, struct qeth_hdr *qhdr)
469 {
470         struct qeth_eddp_data *eddp = NULL;
471         
472         QETH_DBF_TEXT(trace, 5, "eddpficx");
473         /* create our segmentation headers and copy original headers */
474         if (skb->protocol == ETH_P_IP)
475                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
476                                 skb->nh.iph->ihl*4,
477                                 (u8 *)skb->h.th, skb->h.th->doff*4);
478         else
479                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
480                                 sizeof(struct ipv6hdr),
481                                 (u8 *)skb->h.th, skb->h.th->doff*4);
482
483         if (eddp == NULL) {
484                 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
485                 return -ENOMEM;
486         }
487         if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
488                 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
489 #ifdef CONFIG_QETH_VLAN
490                 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
491                         eddp->vlan[0] = __constant_htons(skb->protocol);
492                         eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
493                 }
494 #endif /* CONFIG_QETH_VLAN */
495         }
496         /* the next flags will only be set on the last segment */
497         eddp->th.tcp.h.fin = 0;
498         eddp->th.tcp.h.psh = 0;
499         eddp->skb = skb;
500         /* begin segmentation and fill context */
501         __qeth_eddp_fill_context_tcp(ctx, eddp);
502         kfree(eddp);
503         return 0;
504 }
505
506 static inline void
507 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
508                          int hdr_len)
509 {
510         int skbs_per_page;
511         
512         QETH_DBF_TEXT(trace, 5, "eddpcanp");
513         /* can we put multiple skbs in one page? */
514         skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
515         if (skbs_per_page > 1){
516                 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
517                                  skbs_per_page + 1;
518                 ctx->elements_per_skb = 1;
519         } else {
520                 /* no -> how many elements per skb? */
521                 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
522                                      PAGE_SIZE) >> PAGE_SHIFT;
523                 ctx->num_pages = ctx->elements_per_skb *
524                                  (skb_shinfo(skb)->tso_segs + 1);
525         }
526         ctx->num_elements = ctx->elements_per_skb *
527                             (skb_shinfo(skb)->tso_segs + 1);
528 }
529
530 static inline struct qeth_eddp_context *
531 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
532                                  int hdr_len)
533 {
534         struct qeth_eddp_context *ctx = NULL;
535         u8 *addr;
536         int i;
537
538         QETH_DBF_TEXT(trace, 5, "creddpcg");
539         /* create the context and allocate pages */
540         ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
541         if (ctx == NULL){
542                 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
543                 return NULL;
544         }
545         memset(ctx, 0, sizeof(struct qeth_eddp_context));
546         ctx->type = QETH_LARGE_SEND_EDDP;
547         qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
548         if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
549                 QETH_DBF_TEXT(trace, 2, "ceddpcis");
550                 kfree(ctx);
551                 return NULL;
552         }
553         ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
554         if (ctx->pages == NULL){
555                 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
556                 kfree(ctx);
557                 return NULL;
558         }
559         memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
560         for (i = 0; i < ctx->num_pages; ++i){
561                 addr = (u8 *)__get_free_page(GFP_ATOMIC);
562                 if (addr == NULL){
563                         QETH_DBF_TEXT(trace, 2, "ceddpcn3");
564                         ctx->num_pages = i;
565                         qeth_eddp_free_context(ctx);
566                         return NULL;
567                 }
568                 memset(addr, 0, PAGE_SIZE);
569                 ctx->pages[i] = addr;
570         }
571         ctx->elements = kmalloc(ctx->num_elements *
572                                 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
573         if (ctx->elements == NULL){
574                 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
575                 qeth_eddp_free_context(ctx);
576                 return NULL;
577         }
578         memset(ctx->elements, 0,
579                ctx->num_elements * sizeof(struct qeth_eddp_element));
580         /* reset num_elements; will be incremented again in fill_buffer to
581          * reflect number of actually used elements */
582         ctx->num_elements = 0;
583         return ctx;
584 }
585
586 static inline struct qeth_eddp_context *
587 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
588                              struct qeth_hdr *qhdr)
589 {
590         struct qeth_eddp_context *ctx = NULL;
591         
592         QETH_DBF_TEXT(trace, 5, "creddpct");
593         if (skb->protocol == ETH_P_IP)
594                 ctx = qeth_eddp_create_context_generic(card, skb,
595                         sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
596                         skb->h.th->doff*4);
597         else if (skb->protocol == ETH_P_IPV6)
598                 ctx = qeth_eddp_create_context_generic(card, skb,
599                         sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
600                         skb->h.th->doff*4);
601         else
602                 QETH_DBF_TEXT(trace, 2, "cetcpinv");
603
604         if (ctx == NULL) {
605                 QETH_DBF_TEXT(trace, 2, "creddpnl");
606                 return NULL;
607         }
608         if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
609                 QETH_DBF_TEXT(trace, 2, "ceddptfe");
610                 qeth_eddp_free_context(ctx);
611                 return NULL;
612         }
613         atomic_set(&ctx->refcnt, 1);
614         return ctx;
615 }
616
617 struct qeth_eddp_context *
618 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
619                          struct qeth_hdr *qhdr)
620 {
621         QETH_DBF_TEXT(trace, 5, "creddpc");
622         switch (skb->sk->sk_protocol){
623         case IPPROTO_TCP:
624                 return qeth_eddp_create_context_tcp(card, skb, qhdr);
625         default:
626                 QETH_DBF_TEXT(trace, 2, "eddpinvp");
627         }
628         return NULL;
629 }
630
631