]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ieee802154/6lowpan.c
6lowpan: fix fragmentation on sending side
[karo-tx-linux.git] / net / ieee802154 / 6lowpan.c
1 /*
2  * Copyright 2011, Siemens AG
3  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4  */
5
6 /*
7  * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25  * Copyright (c) 2008, Swedish Institute of Computer Science.
26  * All rights reserved.
27  *
28  * Redistribution and use in source and binary forms, with or without
29  * modification, are permitted provided that the following conditions
30  * are met:
31  * 1. Redistributions of source code must retain the above copyright
32  *    notice, this list of conditions and the following disclaimer.
33  * 2. Redistributions in binary form must reproduce the above copyright
34  *    notice, this list of conditions and the following disclaimer in the
35  *    documentation and/or other materials provided with the distribution.
36  * 3. Neither the name of the Institute nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
61 #include <net/ipv6.h>
62
63 #include "6lowpan.h"
64
65 static LIST_HEAD(lowpan_devices);
66
67 /* private device info */
68 struct lowpan_dev_info {
69         struct net_device       *real_dev; /* real WPAN device ptr */
70         struct mutex            dev_list_mtx; /* mutex for list ops */
71         unsigned short          fragment_tag;
72 };
73
74 struct lowpan_dev_record {
75         struct net_device *ldev;
76         struct list_head list;
77 };
78
79 struct lowpan_fragment {
80         struct sk_buff          *skb;           /* skb to be assembled */
81         u16                     length;         /* length to be assemled */
82         u32                     bytes_rcv;      /* bytes received */
83         u16                     tag;            /* current fragment tag */
84         struct timer_list       timer;          /* assembling timer */
85         struct list_head        list;           /* fragments list */
86 };
87
88 static LIST_HEAD(lowpan_fragments);
89 static DEFINE_SPINLOCK(flist_lock);
90
91 static inline struct
92 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93 {
94         return netdev_priv(dev);
95 }
96
97 static inline void lowpan_address_flip(u8 *src, u8 *dest)
98 {
99         int i;
100         for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101                 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102 }
103
104 static int lowpan_header_create(struct sk_buff *skb,
105                            struct net_device *dev,
106                            unsigned short type, const void *_daddr,
107                            const void *_saddr, unsigned int len)
108 {
109         const u8 *saddr = _saddr;
110         const u8 *daddr = _daddr;
111         struct ieee802154_addr sa, da;
112
113         /* TODO:
114          * if this package isn't ipv6 one, where should it be routed?
115          */
116         if (type != ETH_P_IPV6)
117                 return 0;
118
119         if (!saddr)
120                 saddr = dev->dev_addr;
121
122         raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123         raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124
125         lowpan_header_compress(skb, dev, type, daddr, saddr, len);
126
127         /*
128          * NOTE1: I'm still unsure about the fact that compression and WPAN
129          * header are created here and not later in the xmit. So wait for
130          * an opinion of net maintainers.
131          */
132         /*
133          * NOTE2: to be absolutely correct, we must derive PANid information
134          * from MAC subif of the 'dev' and 'real_dev' network devices, but
135          * this isn't implemented in mainline yet, so currently we assign 0xff
136          */
137         mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138         mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
139
140         /* prepare wpan address data */
141         sa.addr_type = IEEE802154_ADDR_LONG;
142         sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
143
144         memcpy(&(sa.hwaddr), saddr, 8);
145         /* intra-PAN communications */
146         da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
147
148         /*
149          * if the destination address is the broadcast address, use the
150          * corresponding short address
151          */
152         if (lowpan_is_addr_broadcast(daddr)) {
153                 da.addr_type = IEEE802154_ADDR_SHORT;
154                 da.short_addr = IEEE802154_ADDR_BROADCAST;
155         } else {
156                 da.addr_type = IEEE802154_ADDR_LONG;
157                 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
158
159                 /* request acknowledgment */
160                 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
161         }
162
163         return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164                         type, (void *)&da, (void *)&sa, skb->len);
165 }
166
167 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168                                         struct net_device *dev)
169 {
170         struct lowpan_dev_record *entry;
171         struct sk_buff *skb_cp;
172         int stat = NET_RX_SUCCESS;
173
174         rcu_read_lock();
175         list_for_each_entry_rcu(entry, &lowpan_devices, list)
176                 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177                         skb_cp = skb_copy(skb, GFP_ATOMIC);
178                         if (!skb_cp) {
179                                 stat = -ENOMEM;
180                                 break;
181                         }
182
183                         skb_cp->dev = entry->ldev;
184                         stat = netif_rx(skb_cp);
185                 }
186         rcu_read_unlock();
187
188         return stat;
189 }
190
191 static void lowpan_fragment_timer_expired(unsigned long entry_addr)
192 {
193         struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
194
195         pr_debug("timer expired for frame with tag %d\n", entry->tag);
196
197         list_del(&entry->list);
198         dev_kfree_skb(entry->skb);
199         kfree(entry);
200 }
201
202 static struct lowpan_fragment *
203 lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
204 {
205         struct lowpan_fragment *frame;
206
207         frame = kzalloc(sizeof(struct lowpan_fragment),
208                         GFP_ATOMIC);
209         if (!frame)
210                 goto frame_err;
211
212         INIT_LIST_HEAD(&frame->list);
213
214         frame->length = len;
215         frame->tag = tag;
216
217         /* allocate buffer for frame assembling */
218         frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219                                                sizeof(struct ipv6hdr));
220
221         if (!frame->skb)
222                 goto skb_err;
223
224         frame->skb->priority = skb->priority;
225
226         /* reserve headroom for uncompressed ipv6 header */
227         skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228         skb_put(frame->skb, frame->length);
229
230         /* copy the first control block to keep a
231          * trace of the link-layer addresses in case
232          * of a link-local compressed address
233          */
234         memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
235
236         init_timer(&frame->timer);
237         /* time out is the same as for ipv6 - 60 sec */
238         frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239         frame->timer.data = (unsigned long)frame;
240         frame->timer.function = lowpan_fragment_timer_expired;
241
242         add_timer(&frame->timer);
243
244         list_add_tail(&frame->list, &lowpan_fragments);
245
246         return frame;
247
248 skb_err:
249         kfree(frame);
250 frame_err:
251         return NULL;
252 }
253
254 static int process_data(struct sk_buff *skb)
255 {
256         u8 iphc0, iphc1;
257         const struct ieee802154_addr *_saddr, *_daddr;
258
259         raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260         /* at least two bytes will be used for the encoding */
261         if (skb->len < 2)
262                 goto drop;
263
264         if (lowpan_fetch_skb_u8(skb, &iphc0))
265                 goto drop;
266
267         /* fragments assembling */
268         switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269         case LOWPAN_DISPATCH_FRAG1:
270         case LOWPAN_DISPATCH_FRAGN:
271         {
272                 struct lowpan_fragment *frame;
273                 /* slen stores the rightmost 8 bits of the 11 bits length */
274                 u8 slen, offset = 0;
275                 u16 len, tag;
276                 bool found = false;
277
278                 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
279                     lowpan_fetch_skb_u16(skb, &tag))  /* fragment tag */
280                         goto drop;
281
282                 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
283                 len = ((iphc0 & 7) << 8) | slen;
284
285                 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286                         pr_debug("%s received a FRAG1 packet (tag: %d, "
287                                  "size of the entire IP packet: %d)",
288                                  __func__, tag, len);
289                 } else { /* FRAGN */
290                         if (lowpan_fetch_skb_u8(skb, &offset))
291                                 goto unlock_and_drop;
292                         pr_debug("%s received a FRAGN packet (tag: %d, "
293                                  "size of the entire IP packet: %d, "
294                                  "offset: %d)", __func__, tag, len, offset * 8);
295                 }
296
297                 /*
298                  * check if frame assembling with the same tag is
299                  * already in progress
300                  */
301                 spin_lock_bh(&flist_lock);
302
303                 list_for_each_entry(frame, &lowpan_fragments, list)
304                         if (frame->tag == tag) {
305                                 found = true;
306                                 break;
307                         }
308
309                 /* alloc new frame structure */
310                 if (!found) {
311                         pr_debug("%s first fragment received for tag %d, "
312                                  "begin packet reassembly", __func__, tag);
313                         frame = lowpan_alloc_new_frame(skb, len, tag);
314                         if (!frame)
315                                 goto unlock_and_drop;
316                 }
317
318                 /* if payload fits buffer, copy it */
319                 if (likely((offset * 8 + skb->len) <= frame->length))
320                         skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321                                                         skb->data, skb->len);
322                 else
323                         goto unlock_and_drop;
324
325                 frame->bytes_rcv += skb->len;
326
327                 /* frame assembling complete */
328                 if ((frame->bytes_rcv == frame->length) &&
329                      frame->timer.expires > jiffies) {
330                         /* if timer haven't expired - first of all delete it */
331                         del_timer_sync(&frame->timer);
332                         list_del(&frame->list);
333                         spin_unlock_bh(&flist_lock);
334
335                         pr_debug("%s successfully reassembled fragment "
336                                  "(tag %d)", __func__, tag);
337
338                         dev_kfree_skb(skb);
339                         skb = frame->skb;
340                         kfree(frame);
341
342                         if (lowpan_fetch_skb_u8(skb, &iphc0))
343                                 goto drop;
344
345                         break;
346                 }
347                 spin_unlock_bh(&flist_lock);
348
349                 return kfree_skb(skb), 0;
350         }
351         default:
352                 break;
353         }
354
355         if (lowpan_fetch_skb_u8(skb, &iphc1))
356                 goto drop;
357
358         _saddr = &mac_cb(skb)->sa;
359         _daddr = &mac_cb(skb)->da;
360
361         return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362                                 _saddr->addr_type, IEEE802154_ADDR_LEN,
363                                 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364                                 IEEE802154_ADDR_LEN, iphc0, iphc1,
365                                 lowpan_give_skb_to_devices);
366
367 unlock_and_drop:
368         spin_unlock_bh(&flist_lock);
369 drop:
370         kfree_skb(skb);
371         return -EINVAL;
372 }
373
374 static int lowpan_set_address(struct net_device *dev, void *p)
375 {
376         struct sockaddr *sa = p;
377
378         if (netif_running(dev))
379                 return -EBUSY;
380
381         /* TODO: validate addr */
382         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
383
384         return 0;
385 }
386
387 static int
388 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389                         int mlen, int plen, int offset, int type)
390 {
391         struct sk_buff *frag;
392         int hlen;
393
394         hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395                         LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
396
397         raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
398
399         frag = netdev_alloc_skb(skb->dev,
400                                 hlen + mlen + plen + IEEE802154_MFR_SIZE);
401         if (!frag)
402                 return -ENOMEM;
403
404         frag->priority = skb->priority;
405
406         /* copy header, MFR and payload */
407         skb_put(frag, mlen);
408         skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
409
410         skb_put(frag, hlen);
411         skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
412
413         skb_put(frag, plen);
414         skb_copy_to_linear_data_offset(frag, mlen + hlen,
415                                        skb_network_header(skb) + offset, plen);
416
417         raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
418
419         return dev_queue_xmit(frag);
420 }
421
422 static int
423 lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
424 {
425         int err;
426         u16 dgram_offset, dgram_size, payload_length, header_length,
427             lowpan_size, frag_plen, offset, tag;
428         u8 head[5];
429
430         header_length = skb->mac_len;
431         payload_length = skb->len - header_length;
432         tag = lowpan_dev_info(dev)->fragment_tag++;
433         lowpan_size = skb_network_header_len(skb);
434         dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
435                      header_length;
436
437         /* first fragment header */
438         head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
439         head[1] = dgram_size & 0xff;
440         head[2] = tag >> 8;
441         head[3] = tag & 0xff;
442
443         /* calc the nearest payload length(divided to 8) for first fragment
444          * which fits into a IEEE802154_MTU
445          */
446         frag_plen = round_down(IEEE802154_MTU - header_length -
447                                LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
448                                IEEE802154_MFR_SIZE, 8);
449
450         err = lowpan_fragment_xmit(skb, head, header_length,
451                                    frag_plen + lowpan_size, 0,
452                                    LOWPAN_DISPATCH_FRAG1);
453         if (err) {
454                 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
455                          __func__, tag);
456                 goto exit;
457         }
458
459         offset = lowpan_size + frag_plen;
460         dgram_offset += frag_plen;
461
462         /* next fragment header */
463         head[0] &= ~LOWPAN_DISPATCH_FRAG1;
464         head[0] |= LOWPAN_DISPATCH_FRAGN;
465
466         frag_plen = round_down(IEEE802154_MTU - header_length -
467                                LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
468
469         while (payload_length - offset > 0) {
470                 int len = frag_plen;
471
472                 head[4] = dgram_offset >> 3;
473
474                 if (payload_length - offset < len)
475                         len = payload_length - offset;
476
477                 err = lowpan_fragment_xmit(skb, head, header_length, len,
478                                            offset, LOWPAN_DISPATCH_FRAGN);
479                 if (err) {
480                         pr_debug("%s unable to send a subsequent FRAGN packet "
481                                  "(tag: %d, offset: %d", __func__, tag, offset);
482                         goto exit;
483                 }
484
485                 offset += len;
486                 dgram_offset += len;
487         }
488
489 exit:
490         return err;
491 }
492
493 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
494 {
495         int err = -1;
496
497         pr_debug("package xmit\n");
498
499         skb->dev = lowpan_dev_info(dev)->real_dev;
500         if (skb->dev == NULL) {
501                 pr_debug("ERROR: no real wpan device found\n");
502                 goto error;
503         }
504
505         /* Send directly if less than the MTU minus the 2 checksum bytes. */
506         if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
507                 err = dev_queue_xmit(skb);
508                 goto out;
509         }
510
511         pr_debug("frame is too big, fragmentation is needed\n");
512         err = lowpan_skb_fragmentation(skb, dev);
513 error:
514         dev_kfree_skb(skb);
515 out:
516         if (err)
517                 pr_debug("ERROR: xmit failed\n");
518
519         return (err < 0) ? NET_XMIT_DROP : err;
520 }
521
522 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
523 {
524         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
525         return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
526 }
527
528 static u16 lowpan_get_pan_id(const struct net_device *dev)
529 {
530         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
531         return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
532 }
533
534 static u16 lowpan_get_short_addr(const struct net_device *dev)
535 {
536         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
537         return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
538 }
539
540 static u8 lowpan_get_dsn(const struct net_device *dev)
541 {
542         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
543         return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
544 }
545
546 static struct header_ops lowpan_header_ops = {
547         .create = lowpan_header_create,
548 };
549
550 static struct lock_class_key lowpan_tx_busylock;
551 static struct lock_class_key lowpan_netdev_xmit_lock_key;
552
553 static void lowpan_set_lockdep_class_one(struct net_device *dev,
554                                          struct netdev_queue *txq,
555                                          void *_unused)
556 {
557         lockdep_set_class(&txq->_xmit_lock,
558                           &lowpan_netdev_xmit_lock_key);
559 }
560
561
562 static int lowpan_dev_init(struct net_device *dev)
563 {
564         netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
565         dev->qdisc_tx_busylock = &lowpan_tx_busylock;
566         return 0;
567 }
568
569 static const struct net_device_ops lowpan_netdev_ops = {
570         .ndo_init               = lowpan_dev_init,
571         .ndo_start_xmit         = lowpan_xmit,
572         .ndo_set_mac_address    = lowpan_set_address,
573 };
574
575 static struct ieee802154_mlme_ops lowpan_mlme = {
576         .get_pan_id = lowpan_get_pan_id,
577         .get_phy = lowpan_get_phy,
578         .get_short_addr = lowpan_get_short_addr,
579         .get_dsn = lowpan_get_dsn,
580 };
581
582 static void lowpan_setup(struct net_device *dev)
583 {
584         dev->addr_len           = IEEE802154_ADDR_LEN;
585         memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
586         dev->type               = ARPHRD_IEEE802154;
587         /* Frame Control + Sequence Number + Address fields + Security Header */
588         dev->hard_header_len    = 2 + 1 + 20 + 14;
589         dev->needed_tailroom    = 2; /* FCS */
590         dev->mtu                = 1281;
591         dev->tx_queue_len       = 0;
592         dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
593         dev->watchdog_timeo     = 0;
594
595         dev->netdev_ops         = &lowpan_netdev_ops;
596         dev->header_ops         = &lowpan_header_ops;
597         dev->ml_priv            = &lowpan_mlme;
598         dev->destructor         = free_netdev;
599 }
600
601 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
602 {
603         if (tb[IFLA_ADDRESS]) {
604                 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
605                         return -EINVAL;
606         }
607         return 0;
608 }
609
610 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
611         struct packet_type *pt, struct net_device *orig_dev)
612 {
613         struct sk_buff *local_skb;
614
615         if (!netif_running(dev))
616                 goto drop;
617
618         if (dev->type != ARPHRD_IEEE802154)
619                 goto drop;
620
621         /* check that it's our buffer */
622         if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
623                 /* Copy the packet so that the IPv6 header is
624                  * properly aligned.
625                  */
626                 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
627                                             skb_tailroom(skb), GFP_ATOMIC);
628                 if (!local_skb)
629                         goto drop;
630
631                 local_skb->protocol = htons(ETH_P_IPV6);
632                 local_skb->pkt_type = PACKET_HOST;
633
634                 /* Pull off the 1-byte of 6lowpan header. */
635                 skb_pull(local_skb, 1);
636
637                 lowpan_give_skb_to_devices(local_skb, NULL);
638
639                 kfree_skb(local_skb);
640                 kfree_skb(skb);
641         } else {
642                 switch (skb->data[0] & 0xe0) {
643                 case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
644                 case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
645                 case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
646                         local_skb = skb_clone(skb, GFP_ATOMIC);
647                         if (!local_skb)
648                                 goto drop;
649                         process_data(local_skb);
650
651                         kfree_skb(skb);
652                         break;
653                 default:
654                         break;
655                 }
656         }
657
658         return NET_RX_SUCCESS;
659
660 drop:
661         kfree_skb(skb);
662         return NET_RX_DROP;
663 }
664
665 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
666                           struct nlattr *tb[], struct nlattr *data[])
667 {
668         struct net_device *real_dev;
669         struct lowpan_dev_record *entry;
670
671         pr_debug("adding new link\n");
672
673         if (!tb[IFLA_LINK])
674                 return -EINVAL;
675         /* find and hold real wpan device */
676         real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
677         if (!real_dev)
678                 return -ENODEV;
679         if (real_dev->type != ARPHRD_IEEE802154) {
680                 dev_put(real_dev);
681                 return -EINVAL;
682         }
683
684         lowpan_dev_info(dev)->real_dev = real_dev;
685         lowpan_dev_info(dev)->fragment_tag = 0;
686         mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
687
688         entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
689         if (!entry) {
690                 dev_put(real_dev);
691                 lowpan_dev_info(dev)->real_dev = NULL;
692                 return -ENOMEM;
693         }
694
695         entry->ldev = dev;
696
697         /* Set the lowpan harware address to the wpan hardware address. */
698         memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
699
700         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
701         INIT_LIST_HEAD(&entry->list);
702         list_add_tail(&entry->list, &lowpan_devices);
703         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
704
705         register_netdevice(dev);
706
707         return 0;
708 }
709
710 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
711 {
712         struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
713         struct net_device *real_dev = lowpan_dev->real_dev;
714         struct lowpan_dev_record *entry, *tmp;
715
716         ASSERT_RTNL();
717
718         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
719         list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
720                 if (entry->ldev == dev) {
721                         list_del(&entry->list);
722                         kfree(entry);
723                 }
724         }
725         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
726
727         mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
728
729         unregister_netdevice_queue(dev, head);
730
731         dev_put(real_dev);
732 }
733
734 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
735         .kind           = "lowpan",
736         .priv_size      = sizeof(struct lowpan_dev_info),
737         .setup          = lowpan_setup,
738         .newlink        = lowpan_newlink,
739         .dellink        = lowpan_dellink,
740         .validate       = lowpan_validate,
741 };
742
743 static inline int __init lowpan_netlink_init(void)
744 {
745         return rtnl_link_register(&lowpan_link_ops);
746 }
747
748 static inline void lowpan_netlink_fini(void)
749 {
750         rtnl_link_unregister(&lowpan_link_ops);
751 }
752
753 static int lowpan_device_event(struct notifier_block *unused,
754                                unsigned long event, void *ptr)
755 {
756         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
757         LIST_HEAD(del_list);
758         struct lowpan_dev_record *entry, *tmp;
759
760         if (dev->type != ARPHRD_IEEE802154)
761                 goto out;
762
763         if (event == NETDEV_UNREGISTER) {
764                 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
765                         if (lowpan_dev_info(entry->ldev)->real_dev == dev)
766                                 lowpan_dellink(entry->ldev, &del_list);
767                 }
768
769                 unregister_netdevice_many(&del_list);
770         }
771
772 out:
773         return NOTIFY_DONE;
774 }
775
776 static struct notifier_block lowpan_dev_notifier = {
777         .notifier_call = lowpan_device_event,
778 };
779
780 static struct packet_type lowpan_packet_type = {
781         .type = __constant_htons(ETH_P_IEEE802154),
782         .func = lowpan_rcv,
783 };
784
785 static int __init lowpan_init_module(void)
786 {
787         int err = 0;
788
789         err = lowpan_netlink_init();
790         if (err < 0)
791                 goto out;
792
793         dev_add_pack(&lowpan_packet_type);
794
795         err = register_netdevice_notifier(&lowpan_dev_notifier);
796         if (err < 0) {
797                 dev_remove_pack(&lowpan_packet_type);
798                 lowpan_netlink_fini();
799         }
800 out:
801         return err;
802 }
803
804 static void __exit lowpan_cleanup_module(void)
805 {
806         struct lowpan_fragment *frame, *tframe;
807
808         lowpan_netlink_fini();
809
810         dev_remove_pack(&lowpan_packet_type);
811
812         unregister_netdevice_notifier(&lowpan_dev_notifier);
813
814         /* Now 6lowpan packet_type is removed, so no new fragments are
815          * expected on RX, therefore that's the time to clean incomplete
816          * fragments.
817          */
818         spin_lock_bh(&flist_lock);
819         list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
820                 del_timer_sync(&frame->timer);
821                 list_del(&frame->list);
822                 dev_kfree_skb(frame->skb);
823                 kfree(frame);
824         }
825         spin_unlock_bh(&flist_lock);
826 }
827
828 module_init(lowpan_init_module);
829 module_exit(lowpan_cleanup_module);
830 MODULE_LICENSE("GPL");
831 MODULE_ALIAS_RTNL_LINK("lowpan");