]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ieee802154/6lowpan_rtnl.c
6lowpan: change tag type to __be16
[karo-tx-linux.git] / net / ieee802154 / 6lowpan_rtnl.c
1 /*
2  * Copyright 2011, Siemens AG
3  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4  */
5
6 /*
7  * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25  * Copyright (c) 2008, Swedish Institute of Computer Science.
26  * All rights reserved.
27  *
28  * Redistribution and use in source and binary forms, with or without
29  * modification, are permitted provided that the following conditions
30  * are met:
31  * 1. Redistributions of source code must retain the above copyright
32  *    notice, this list of conditions and the following disclaimer.
33  * 2. Redistributions in binary form must reproduce the above copyright
34  *    notice, this list of conditions and the following disclaimer in the
35  *    documentation and/or other materials provided with the distribution.
36  * 3. Neither the name of the Institute nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
61 #include <net/ipv6.h>
62
63 #include "6lowpan.h"
64
65 static LIST_HEAD(lowpan_devices);
66
67 /* private device info */
68 struct lowpan_dev_info {
69         struct net_device       *real_dev; /* real WPAN device ptr */
70         struct mutex            dev_list_mtx; /* mutex for list ops */
71         __be16                  fragment_tag;
72 };
73
74 struct lowpan_dev_record {
75         struct net_device *ldev;
76         struct list_head list;
77 };
78
79 struct lowpan_fragment {
80         struct sk_buff          *skb;           /* skb to be assembled */
81         u16                     length;         /* length to be assemled */
82         u32                     bytes_rcv;      /* bytes received */
83         u16                     tag;            /* current fragment tag */
84         struct timer_list       timer;          /* assembling timer */
85         struct list_head        list;           /* fragments list */
86 };
87
88 static LIST_HEAD(lowpan_fragments);
89 static DEFINE_SPINLOCK(flist_lock);
90
91 static inline struct
92 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93 {
94         return netdev_priv(dev);
95 }
96
97 static inline void lowpan_address_flip(u8 *src, u8 *dest)
98 {
99         int i;
100         for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101                 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102 }
103
104 static int lowpan_header_create(struct sk_buff *skb,
105                            struct net_device *dev,
106                            unsigned short type, const void *_daddr,
107                            const void *_saddr, unsigned int len)
108 {
109         const u8 *saddr = _saddr;
110         const u8 *daddr = _daddr;
111         struct ieee802154_addr sa, da;
112
113         /* TODO:
114          * if this package isn't ipv6 one, where should it be routed?
115          */
116         if (type != ETH_P_IPV6)
117                 return 0;
118
119         if (!saddr)
120                 saddr = dev->dev_addr;
121
122         raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123         raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124
125         lowpan_header_compress(skb, dev, type, daddr, saddr, len);
126
127         /*
128          * NOTE1: I'm still unsure about the fact that compression and WPAN
129          * header are created here and not later in the xmit. So wait for
130          * an opinion of net maintainers.
131          */
132         /*
133          * NOTE2: to be absolutely correct, we must derive PANid information
134          * from MAC subif of the 'dev' and 'real_dev' network devices, but
135          * this isn't implemented in mainline yet, so currently we assign 0xff
136          */
137         mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138         mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
139
140         /* prepare wpan address data */
141         sa.addr_type = IEEE802154_ADDR_LONG;
142         sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
143
144         memcpy(&(sa.hwaddr), saddr, 8);
145         /* intra-PAN communications */
146         da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
147
148         /*
149          * if the destination address is the broadcast address, use the
150          * corresponding short address
151          */
152         if (lowpan_is_addr_broadcast(daddr)) {
153                 da.addr_type = IEEE802154_ADDR_SHORT;
154                 da.short_addr = IEEE802154_ADDR_BROADCAST;
155         } else {
156                 da.addr_type = IEEE802154_ADDR_LONG;
157                 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
158
159                 /* request acknowledgment */
160                 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
161         }
162
163         return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164                         type, (void *)&da, (void *)&sa, skb->len);
165 }
166
167 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168                                         struct net_device *dev)
169 {
170         struct lowpan_dev_record *entry;
171         struct sk_buff *skb_cp;
172         int stat = NET_RX_SUCCESS;
173
174         rcu_read_lock();
175         list_for_each_entry_rcu(entry, &lowpan_devices, list)
176                 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177                         skb_cp = skb_copy(skb, GFP_ATOMIC);
178                         if (!skb_cp) {
179                                 stat = -ENOMEM;
180                                 break;
181                         }
182
183                         skb_cp->dev = entry->ldev;
184                         stat = netif_rx(skb_cp);
185                 }
186         rcu_read_unlock();
187
188         return stat;
189 }
190
191 static void lowpan_fragment_timer_expired(unsigned long entry_addr)
192 {
193         struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
194
195         pr_debug("timer expired for frame with tag %d\n", entry->tag);
196
197         list_del(&entry->list);
198         dev_kfree_skb(entry->skb);
199         kfree(entry);
200 }
201
202 static struct lowpan_fragment *
203 lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
204 {
205         struct lowpan_fragment *frame;
206
207         frame = kzalloc(sizeof(struct lowpan_fragment),
208                         GFP_ATOMIC);
209         if (!frame)
210                 goto frame_err;
211
212         INIT_LIST_HEAD(&frame->list);
213
214         frame->length = len;
215         frame->tag = tag;
216
217         /* allocate buffer for frame assembling */
218         frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219                                                sizeof(struct ipv6hdr));
220
221         if (!frame->skb)
222                 goto skb_err;
223
224         frame->skb->priority = skb->priority;
225
226         /* reserve headroom for uncompressed ipv6 header */
227         skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228         skb_put(frame->skb, frame->length);
229
230         /* copy the first control block to keep a
231          * trace of the link-layer addresses in case
232          * of a link-local compressed address
233          */
234         memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
235
236         init_timer(&frame->timer);
237         /* time out is the same as for ipv6 - 60 sec */
238         frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239         frame->timer.data = (unsigned long)frame;
240         frame->timer.function = lowpan_fragment_timer_expired;
241
242         add_timer(&frame->timer);
243
244         list_add_tail(&frame->list, &lowpan_fragments);
245
246         return frame;
247
248 skb_err:
249         kfree(frame);
250 frame_err:
251         return NULL;
252 }
253
254 static int process_data(struct sk_buff *skb)
255 {
256         u8 iphc0, iphc1;
257         const struct ieee802154_addr *_saddr, *_daddr;
258
259         raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260         /* at least two bytes will be used for the encoding */
261         if (skb->len < 2)
262                 goto drop;
263
264         if (lowpan_fetch_skb_u8(skb, &iphc0))
265                 goto drop;
266
267         /* fragments assembling */
268         switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269         case LOWPAN_DISPATCH_FRAG1:
270         case LOWPAN_DISPATCH_FRAGN:
271         {
272                 struct lowpan_fragment *frame;
273                 /* slen stores the rightmost 8 bits of the 11 bits length */
274                 u8 slen, offset = 0;
275                 u16 len, tag;
276                 bool found = false;
277
278                 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
279                     lowpan_fetch_skb_u16(skb, &tag))  /* fragment tag */
280                         goto drop;
281
282                 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
283                 len = ((iphc0 & 7) << 8) | slen;
284
285                 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286                         pr_debug("%s received a FRAG1 packet (tag: %d, "
287                                  "size of the entire IP packet: %d)",
288                                  __func__, tag, len);
289                 } else { /* FRAGN */
290                         if (lowpan_fetch_skb_u8(skb, &offset))
291                                 goto unlock_and_drop;
292                         pr_debug("%s received a FRAGN packet (tag: %d, "
293                                  "size of the entire IP packet: %d, "
294                                  "offset: %d)", __func__, tag, len, offset * 8);
295                 }
296
297                 /*
298                  * check if frame assembling with the same tag is
299                  * already in progress
300                  */
301                 spin_lock_bh(&flist_lock);
302
303                 list_for_each_entry(frame, &lowpan_fragments, list)
304                         if (frame->tag == tag) {
305                                 found = true;
306                                 break;
307                         }
308
309                 /* alloc new frame structure */
310                 if (!found) {
311                         pr_debug("%s first fragment received for tag %d, "
312                                  "begin packet reassembly", __func__, tag);
313                         frame = lowpan_alloc_new_frame(skb, len, tag);
314                         if (!frame)
315                                 goto unlock_and_drop;
316                 }
317
318                 /* if payload fits buffer, copy it */
319                 if (likely((offset * 8 + skb->len) <= frame->length))
320                         skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321                                                         skb->data, skb->len);
322                 else
323                         goto unlock_and_drop;
324
325                 frame->bytes_rcv += skb->len;
326
327                 /* frame assembling complete */
328                 if ((frame->bytes_rcv == frame->length) &&
329                      frame->timer.expires > jiffies) {
330                         /* if timer haven't expired - first of all delete it */
331                         del_timer_sync(&frame->timer);
332                         list_del(&frame->list);
333                         spin_unlock_bh(&flist_lock);
334
335                         pr_debug("%s successfully reassembled fragment "
336                                  "(tag %d)", __func__, tag);
337
338                         dev_kfree_skb(skb);
339                         skb = frame->skb;
340                         kfree(frame);
341
342                         if (lowpan_fetch_skb_u8(skb, &iphc0))
343                                 goto drop;
344
345                         break;
346                 }
347                 spin_unlock_bh(&flist_lock);
348
349                 return kfree_skb(skb), 0;
350         }
351         default:
352                 break;
353         }
354
355         if (lowpan_fetch_skb_u8(skb, &iphc1))
356                 goto drop;
357
358         _saddr = &mac_cb(skb)->sa;
359         _daddr = &mac_cb(skb)->da;
360
361         return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362                                 _saddr->addr_type, IEEE802154_ADDR_LEN,
363                                 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364                                 IEEE802154_ADDR_LEN, iphc0, iphc1,
365                                 lowpan_give_skb_to_devices);
366
367 unlock_and_drop:
368         spin_unlock_bh(&flist_lock);
369 drop:
370         kfree_skb(skb);
371         return -EINVAL;
372 }
373
374 static int lowpan_set_address(struct net_device *dev, void *p)
375 {
376         struct sockaddr *sa = p;
377
378         if (netif_running(dev))
379                 return -EBUSY;
380
381         /* TODO: validate addr */
382         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
383
384         return 0;
385 }
386
387 static int
388 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389                         int mlen, int plen, int offset, int type)
390 {
391         struct sk_buff *frag;
392         int hlen;
393
394         hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395                         LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
396
397         raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
398
399         frag = netdev_alloc_skb(skb->dev,
400                                 hlen + mlen + plen + IEEE802154_MFR_SIZE);
401         if (!frag)
402                 return -ENOMEM;
403
404         frag->priority = skb->priority;
405
406         /* copy header, MFR and payload */
407         skb_put(frag, mlen);
408         skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
409
410         skb_put(frag, hlen);
411         skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
412
413         skb_put(frag, plen);
414         skb_copy_to_linear_data_offset(frag, mlen + hlen,
415                                        skb_network_header(skb) + offset, plen);
416
417         raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
418
419         return dev_queue_xmit(frag);
420 }
421
422 static int
423 lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
424 {
425         int err;
426         u16 dgram_offset, dgram_size, payload_length, header_length,
427             lowpan_size, frag_plen, offset;
428         __be16 tag;
429         u8 head[5];
430
431         header_length = skb->mac_len;
432         payload_length = skb->len - header_length;
433         tag = lowpan_dev_info(dev)->fragment_tag++;
434         lowpan_size = skb_network_header_len(skb);
435         dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
436                      header_length;
437
438         /* first fragment header */
439         head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
440         head[1] = dgram_size & 0xff;
441         head[2] = tag >> 8;
442         head[3] = tag & 0xff;
443
444         /* calc the nearest payload length(divided to 8) for first fragment
445          * which fits into a IEEE802154_MTU
446          */
447         frag_plen = round_down(IEEE802154_MTU - header_length -
448                                LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
449                                IEEE802154_MFR_SIZE, 8);
450
451         err = lowpan_fragment_xmit(skb, head, header_length,
452                                    frag_plen + lowpan_size, 0,
453                                    LOWPAN_DISPATCH_FRAG1);
454         if (err) {
455                 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
456                          __func__, tag);
457                 goto exit;
458         }
459
460         offset = lowpan_size + frag_plen;
461         dgram_offset += frag_plen;
462
463         /* next fragment header */
464         head[0] &= ~LOWPAN_DISPATCH_FRAG1;
465         head[0] |= LOWPAN_DISPATCH_FRAGN;
466
467         frag_plen = round_down(IEEE802154_MTU - header_length -
468                                LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
469
470         while (payload_length - offset > 0) {
471                 int len = frag_plen;
472
473                 head[4] = dgram_offset >> 3;
474
475                 if (payload_length - offset < len)
476                         len = payload_length - offset;
477
478                 err = lowpan_fragment_xmit(skb, head, header_length, len,
479                                            offset, LOWPAN_DISPATCH_FRAGN);
480                 if (err) {
481                         pr_debug("%s unable to send a subsequent FRAGN packet "
482                                  "(tag: %d, offset: %d", __func__, tag, offset);
483                         goto exit;
484                 }
485
486                 offset += len;
487                 dgram_offset += len;
488         }
489
490 exit:
491         return err;
492 }
493
494 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
495 {
496         int err = -1;
497
498         pr_debug("package xmit\n");
499
500         skb->dev = lowpan_dev_info(dev)->real_dev;
501         if (skb->dev == NULL) {
502                 pr_debug("ERROR: no real wpan device found\n");
503                 goto error;
504         }
505
506         /* Send directly if less than the MTU minus the 2 checksum bytes. */
507         if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
508                 err = dev_queue_xmit(skb);
509                 goto out;
510         }
511
512         pr_debug("frame is too big, fragmentation is needed\n");
513         err = lowpan_skb_fragmentation(skb, dev);
514 error:
515         dev_kfree_skb(skb);
516 out:
517         if (err)
518                 pr_debug("ERROR: xmit failed\n");
519
520         return (err < 0) ? NET_XMIT_DROP : err;
521 }
522
523 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
524 {
525         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
526         return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
527 }
528
529 static u16 lowpan_get_pan_id(const struct net_device *dev)
530 {
531         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
532         return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
533 }
534
535 static u16 lowpan_get_short_addr(const struct net_device *dev)
536 {
537         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
538         return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
539 }
540
541 static u8 lowpan_get_dsn(const struct net_device *dev)
542 {
543         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
544         return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
545 }
546
547 static struct header_ops lowpan_header_ops = {
548         .create = lowpan_header_create,
549 };
550
551 static struct lock_class_key lowpan_tx_busylock;
552 static struct lock_class_key lowpan_netdev_xmit_lock_key;
553
554 static void lowpan_set_lockdep_class_one(struct net_device *dev,
555                                          struct netdev_queue *txq,
556                                          void *_unused)
557 {
558         lockdep_set_class(&txq->_xmit_lock,
559                           &lowpan_netdev_xmit_lock_key);
560 }
561
562
563 static int lowpan_dev_init(struct net_device *dev)
564 {
565         netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
566         dev->qdisc_tx_busylock = &lowpan_tx_busylock;
567         return 0;
568 }
569
570 static const struct net_device_ops lowpan_netdev_ops = {
571         .ndo_init               = lowpan_dev_init,
572         .ndo_start_xmit         = lowpan_xmit,
573         .ndo_set_mac_address    = lowpan_set_address,
574 };
575
576 static struct ieee802154_mlme_ops lowpan_mlme = {
577         .get_pan_id = lowpan_get_pan_id,
578         .get_phy = lowpan_get_phy,
579         .get_short_addr = lowpan_get_short_addr,
580         .get_dsn = lowpan_get_dsn,
581 };
582
583 static void lowpan_setup(struct net_device *dev)
584 {
585         dev->addr_len           = IEEE802154_ADDR_LEN;
586         memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
587         dev->type               = ARPHRD_IEEE802154;
588         /* Frame Control + Sequence Number + Address fields + Security Header */
589         dev->hard_header_len    = 2 + 1 + 20 + 14;
590         dev->needed_tailroom    = 2; /* FCS */
591         dev->mtu                = 1281;
592         dev->tx_queue_len       = 0;
593         dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
594         dev->watchdog_timeo     = 0;
595
596         dev->netdev_ops         = &lowpan_netdev_ops;
597         dev->header_ops         = &lowpan_header_ops;
598         dev->ml_priv            = &lowpan_mlme;
599         dev->destructor         = free_netdev;
600 }
601
602 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
603 {
604         if (tb[IFLA_ADDRESS]) {
605                 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
606                         return -EINVAL;
607         }
608         return 0;
609 }
610
611 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
612         struct packet_type *pt, struct net_device *orig_dev)
613 {
614         struct sk_buff *local_skb;
615
616         if (!netif_running(dev))
617                 goto drop;
618
619         if (dev->type != ARPHRD_IEEE802154)
620                 goto drop;
621
622         /* check that it's our buffer */
623         if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
624                 /* Copy the packet so that the IPv6 header is
625                  * properly aligned.
626                  */
627                 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
628                                             skb_tailroom(skb), GFP_ATOMIC);
629                 if (!local_skb)
630                         goto drop;
631
632                 local_skb->protocol = htons(ETH_P_IPV6);
633                 local_skb->pkt_type = PACKET_HOST;
634
635                 /* Pull off the 1-byte of 6lowpan header. */
636                 skb_pull(local_skb, 1);
637
638                 lowpan_give_skb_to_devices(local_skb, NULL);
639
640                 kfree_skb(local_skb);
641                 kfree_skb(skb);
642         } else {
643                 switch (skb->data[0] & 0xe0) {
644                 case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
645                 case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
646                 case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
647                         local_skb = skb_clone(skb, GFP_ATOMIC);
648                         if (!local_skb)
649                                 goto drop;
650                         process_data(local_skb);
651
652                         kfree_skb(skb);
653                         break;
654                 default:
655                         break;
656                 }
657         }
658
659         return NET_RX_SUCCESS;
660
661 drop:
662         kfree_skb(skb);
663         return NET_RX_DROP;
664 }
665
666 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
667                           struct nlattr *tb[], struct nlattr *data[])
668 {
669         struct net_device *real_dev;
670         struct lowpan_dev_record *entry;
671
672         pr_debug("adding new link\n");
673
674         if (!tb[IFLA_LINK])
675                 return -EINVAL;
676         /* find and hold real wpan device */
677         real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
678         if (!real_dev)
679                 return -ENODEV;
680         if (real_dev->type != ARPHRD_IEEE802154) {
681                 dev_put(real_dev);
682                 return -EINVAL;
683         }
684
685         lowpan_dev_info(dev)->real_dev = real_dev;
686         lowpan_dev_info(dev)->fragment_tag = 0;
687         mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
688
689         entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
690         if (!entry) {
691                 dev_put(real_dev);
692                 lowpan_dev_info(dev)->real_dev = NULL;
693                 return -ENOMEM;
694         }
695
696         entry->ldev = dev;
697
698         /* Set the lowpan harware address to the wpan hardware address. */
699         memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
700
701         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
702         INIT_LIST_HEAD(&entry->list);
703         list_add_tail(&entry->list, &lowpan_devices);
704         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
705
706         register_netdevice(dev);
707
708         return 0;
709 }
710
711 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
712 {
713         struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
714         struct net_device *real_dev = lowpan_dev->real_dev;
715         struct lowpan_dev_record *entry, *tmp;
716
717         ASSERT_RTNL();
718
719         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
720         list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
721                 if (entry->ldev == dev) {
722                         list_del(&entry->list);
723                         kfree(entry);
724                 }
725         }
726         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
727
728         mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
729
730         unregister_netdevice_queue(dev, head);
731
732         dev_put(real_dev);
733 }
734
735 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
736         .kind           = "lowpan",
737         .priv_size      = sizeof(struct lowpan_dev_info),
738         .setup          = lowpan_setup,
739         .newlink        = lowpan_newlink,
740         .dellink        = lowpan_dellink,
741         .validate       = lowpan_validate,
742 };
743
744 static inline int __init lowpan_netlink_init(void)
745 {
746         return rtnl_link_register(&lowpan_link_ops);
747 }
748
749 static inline void lowpan_netlink_fini(void)
750 {
751         rtnl_link_unregister(&lowpan_link_ops);
752 }
753
754 static int lowpan_device_event(struct notifier_block *unused,
755                                unsigned long event, void *ptr)
756 {
757         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
758         LIST_HEAD(del_list);
759         struct lowpan_dev_record *entry, *tmp;
760
761         if (dev->type != ARPHRD_IEEE802154)
762                 goto out;
763
764         if (event == NETDEV_UNREGISTER) {
765                 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
766                         if (lowpan_dev_info(entry->ldev)->real_dev == dev)
767                                 lowpan_dellink(entry->ldev, &del_list);
768                 }
769
770                 unregister_netdevice_many(&del_list);
771         }
772
773 out:
774         return NOTIFY_DONE;
775 }
776
777 static struct notifier_block lowpan_dev_notifier = {
778         .notifier_call = lowpan_device_event,
779 };
780
781 static struct packet_type lowpan_packet_type = {
782         .type = __constant_htons(ETH_P_IEEE802154),
783         .func = lowpan_rcv,
784 };
785
786 static int __init lowpan_init_module(void)
787 {
788         int err = 0;
789
790         err = lowpan_netlink_init();
791         if (err < 0)
792                 goto out;
793
794         dev_add_pack(&lowpan_packet_type);
795
796         err = register_netdevice_notifier(&lowpan_dev_notifier);
797         if (err < 0) {
798                 dev_remove_pack(&lowpan_packet_type);
799                 lowpan_netlink_fini();
800         }
801 out:
802         return err;
803 }
804
805 static void __exit lowpan_cleanup_module(void)
806 {
807         struct lowpan_fragment *frame, *tframe;
808
809         lowpan_netlink_fini();
810
811         dev_remove_pack(&lowpan_packet_type);
812
813         unregister_netdevice_notifier(&lowpan_dev_notifier);
814
815         /* Now 6lowpan packet_type is removed, so no new fragments are
816          * expected on RX, therefore that's the time to clean incomplete
817          * fragments.
818          */
819         spin_lock_bh(&flist_lock);
820         list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
821                 del_timer_sync(&frame->timer);
822                 list_del(&frame->list);
823                 dev_kfree_skb(frame->skb);
824                 kfree(frame);
825         }
826         spin_unlock_bh(&flist_lock);
827 }
828
829 module_init(lowpan_init_module);
830 module_exit(lowpan_cleanup_module);
831 MODULE_LICENSE("GPL");
832 MODULE_ALIAS_RTNL_LINK("lowpan");