2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
65 static LIST_HEAD(lowpan_devices);
67 /* private device info */
68 struct lowpan_dev_info {
69 struct net_device *real_dev; /* real WPAN device ptr */
70 struct mutex dev_list_mtx; /* mutex for list ops */
71 unsigned short fragment_tag;
74 struct lowpan_dev_record {
75 struct net_device *ldev;
76 struct list_head list;
79 struct lowpan_fragment {
80 struct sk_buff *skb; /* skb to be assembled */
81 u16 length; /* length to be assemled */
82 u32 bytes_rcv; /* bytes received */
83 u16 tag; /* current fragment tag */
84 struct timer_list timer; /* assembling timer */
85 struct list_head list; /* fragments list */
88 static LIST_HEAD(lowpan_fragments);
89 static DEFINE_SPINLOCK(flist_lock);
92 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
94 return netdev_priv(dev);
97 static inline void lowpan_address_flip(u8 *src, u8 *dest)
100 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
104 static int lowpan_header_create(struct sk_buff *skb,
105 struct net_device *dev,
106 unsigned short type, const void *_daddr,
107 const void *_saddr, unsigned int len)
109 const u8 *saddr = _saddr;
110 const u8 *daddr = _daddr;
111 struct ieee802154_addr sa, da;
114 * if this package isn't ipv6 one, where should it be routed?
116 if (type != ETH_P_IPV6)
120 saddr = dev->dev_addr;
122 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
125 lowpan_header_compress(skb, dev, type, daddr, saddr, len);
128 * NOTE1: I'm still unsure about the fact that compression and WPAN
129 * header are created here and not later in the xmit. So wait for
130 * an opinion of net maintainers.
133 * NOTE2: to be absolutely correct, we must derive PANid information
134 * from MAC subif of the 'dev' and 'real_dev' network devices, but
135 * this isn't implemented in mainline yet, so currently we assign 0xff
137 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
140 /* prepare wpan address data */
141 sa.addr_type = IEEE802154_ADDR_LONG;
142 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
144 memcpy(&(sa.hwaddr), saddr, 8);
145 /* intra-PAN communications */
146 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
149 * if the destination address is the broadcast address, use the
150 * corresponding short address
152 if (lowpan_is_addr_broadcast(daddr)) {
153 da.addr_type = IEEE802154_ADDR_SHORT;
154 da.short_addr = IEEE802154_ADDR_BROADCAST;
156 da.addr_type = IEEE802154_ADDR_LONG;
157 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
159 /* request acknowledgment */
160 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
163 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164 type, (void *)&da, (void *)&sa, skb->len);
167 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168 struct net_device *dev)
170 struct lowpan_dev_record *entry;
171 struct sk_buff *skb_cp;
172 int stat = NET_RX_SUCCESS;
175 list_for_each_entry_rcu(entry, &lowpan_devices, list)
176 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177 skb_cp = skb_copy(skb, GFP_ATOMIC);
183 skb_cp->dev = entry->ldev;
184 stat = netif_rx(skb_cp);
191 static void lowpan_fragment_timer_expired(unsigned long entry_addr)
193 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
195 pr_debug("timer expired for frame with tag %d\n", entry->tag);
197 list_del(&entry->list);
198 dev_kfree_skb(entry->skb);
202 static struct lowpan_fragment *
203 lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
205 struct lowpan_fragment *frame;
207 frame = kzalloc(sizeof(struct lowpan_fragment),
212 INIT_LIST_HEAD(&frame->list);
217 /* allocate buffer for frame assembling */
218 frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219 sizeof(struct ipv6hdr));
224 frame->skb->priority = skb->priority;
226 /* reserve headroom for uncompressed ipv6 header */
227 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228 skb_put(frame->skb, frame->length);
230 /* copy the first control block to keep a
231 * trace of the link-layer addresses in case
232 * of a link-local compressed address
234 memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
236 init_timer(&frame->timer);
237 /* time out is the same as for ipv6 - 60 sec */
238 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239 frame->timer.data = (unsigned long)frame;
240 frame->timer.function = lowpan_fragment_timer_expired;
242 add_timer(&frame->timer);
244 list_add_tail(&frame->list, &lowpan_fragments);
254 static int process_data(struct sk_buff *skb)
257 const struct ieee802154_addr *_saddr, *_daddr;
259 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260 /* at least two bytes will be used for the encoding */
264 if (lowpan_fetch_skb_u8(skb, &iphc0))
267 /* fragments assembling */
268 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269 case LOWPAN_DISPATCH_FRAG1:
270 case LOWPAN_DISPATCH_FRAGN:
272 struct lowpan_fragment *frame;
273 /* slen stores the rightmost 8 bits of the 11 bits length */
278 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
279 lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
282 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
283 len = ((iphc0 & 7) << 8) | slen;
285 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286 pr_debug("%s received a FRAG1 packet (tag: %d, "
287 "size of the entire IP packet: %d)",
290 if (lowpan_fetch_skb_u8(skb, &offset))
291 goto unlock_and_drop;
292 pr_debug("%s received a FRAGN packet (tag: %d, "
293 "size of the entire IP packet: %d, "
294 "offset: %d)", __func__, tag, len, offset * 8);
298 * check if frame assembling with the same tag is
299 * already in progress
301 spin_lock_bh(&flist_lock);
303 list_for_each_entry(frame, &lowpan_fragments, list)
304 if (frame->tag == tag) {
309 /* alloc new frame structure */
311 pr_debug("%s first fragment received for tag %d, "
312 "begin packet reassembly", __func__, tag);
313 frame = lowpan_alloc_new_frame(skb, len, tag);
315 goto unlock_and_drop;
318 /* if payload fits buffer, copy it */
319 if (likely((offset * 8 + skb->len) <= frame->length))
320 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321 skb->data, skb->len);
323 goto unlock_and_drop;
325 frame->bytes_rcv += skb->len;
327 /* frame assembling complete */
328 if ((frame->bytes_rcv == frame->length) &&
329 frame->timer.expires > jiffies) {
330 /* if timer haven't expired - first of all delete it */
331 del_timer_sync(&frame->timer);
332 list_del(&frame->list);
333 spin_unlock_bh(&flist_lock);
335 pr_debug("%s successfully reassembled fragment "
336 "(tag %d)", __func__, tag);
342 if (lowpan_fetch_skb_u8(skb, &iphc0))
347 spin_unlock_bh(&flist_lock);
349 return kfree_skb(skb), 0;
355 if (lowpan_fetch_skb_u8(skb, &iphc1))
358 _saddr = &mac_cb(skb)->sa;
359 _daddr = &mac_cb(skb)->da;
361 return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362 _saddr->addr_type, IEEE802154_ADDR_LEN,
363 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364 IEEE802154_ADDR_LEN, iphc0, iphc1,
365 lowpan_give_skb_to_devices);
368 spin_unlock_bh(&flist_lock);
374 static int lowpan_set_address(struct net_device *dev, void *p)
376 struct sockaddr *sa = p;
378 if (netif_running(dev))
381 /* TODO: validate addr */
382 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
388 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389 int mlen, int plen, int offset, int type)
391 struct sk_buff *frag;
394 hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
397 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
399 frag = netdev_alloc_skb(skb->dev,
400 hlen + mlen + plen + IEEE802154_MFR_SIZE);
404 frag->priority = skb->priority;
406 /* copy header, MFR and payload */
408 skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
411 skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
414 skb_copy_to_linear_data_offset(frag, mlen + hlen,
415 skb_network_header(skb) + offset, plen);
417 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
419 return dev_queue_xmit(frag);
423 lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
425 int err, header_length, payload_length, tag, offset = 0;
428 header_length = skb->mac_len;
429 payload_length = skb->len - header_length;
430 tag = lowpan_dev_info(dev)->fragment_tag++;
432 /* first fragment header */
433 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
434 head[1] = payload_length & 0xff;
436 head[3] = tag & 0xff;
438 err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
439 0, LOWPAN_DISPATCH_FRAG1);
442 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
447 offset = LOWPAN_FRAG_SIZE;
449 /* next fragment header */
450 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
451 head[0] |= LOWPAN_DISPATCH_FRAGN;
453 while (payload_length - offset > 0) {
454 int len = LOWPAN_FRAG_SIZE;
456 head[4] = offset / 8;
458 if (payload_length - offset < len)
459 len = payload_length - offset;
461 err = lowpan_fragment_xmit(skb, head, header_length,
462 len, offset, LOWPAN_DISPATCH_FRAGN);
464 pr_debug("%s unable to send a subsequent FRAGN packet "
465 "(tag: %d, offset: %d", __func__, tag, offset);
476 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
480 pr_debug("package xmit\n");
482 skb->dev = lowpan_dev_info(dev)->real_dev;
483 if (skb->dev == NULL) {
484 pr_debug("ERROR: no real wpan device found\n");
488 /* Send directly if less than the MTU minus the 2 checksum bytes. */
489 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
490 err = dev_queue_xmit(skb);
494 pr_debug("frame is too big, fragmentation is needed\n");
495 err = lowpan_skb_fragmentation(skb, dev);
500 pr_debug("ERROR: xmit failed\n");
502 return (err < 0) ? NET_XMIT_DROP : err;
505 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
507 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
508 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
511 static u16 lowpan_get_pan_id(const struct net_device *dev)
513 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
514 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
517 static u16 lowpan_get_short_addr(const struct net_device *dev)
519 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
520 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
523 static u8 lowpan_get_dsn(const struct net_device *dev)
525 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
526 return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
529 static struct header_ops lowpan_header_ops = {
530 .create = lowpan_header_create,
533 static struct lock_class_key lowpan_tx_busylock;
534 static struct lock_class_key lowpan_netdev_xmit_lock_key;
536 static void lowpan_set_lockdep_class_one(struct net_device *dev,
537 struct netdev_queue *txq,
540 lockdep_set_class(&txq->_xmit_lock,
541 &lowpan_netdev_xmit_lock_key);
545 static int lowpan_dev_init(struct net_device *dev)
547 netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
548 dev->qdisc_tx_busylock = &lowpan_tx_busylock;
552 static const struct net_device_ops lowpan_netdev_ops = {
553 .ndo_init = lowpan_dev_init,
554 .ndo_start_xmit = lowpan_xmit,
555 .ndo_set_mac_address = lowpan_set_address,
558 static struct ieee802154_mlme_ops lowpan_mlme = {
559 .get_pan_id = lowpan_get_pan_id,
560 .get_phy = lowpan_get_phy,
561 .get_short_addr = lowpan_get_short_addr,
562 .get_dsn = lowpan_get_dsn,
565 static void lowpan_setup(struct net_device *dev)
567 dev->addr_len = IEEE802154_ADDR_LEN;
568 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
569 dev->type = ARPHRD_IEEE802154;
570 /* Frame Control + Sequence Number + Address fields + Security Header */
571 dev->hard_header_len = 2 + 1 + 20 + 14;
572 dev->needed_tailroom = 2; /* FCS */
574 dev->tx_queue_len = 0;
575 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
576 dev->watchdog_timeo = 0;
578 dev->netdev_ops = &lowpan_netdev_ops;
579 dev->header_ops = &lowpan_header_ops;
580 dev->ml_priv = &lowpan_mlme;
581 dev->destructor = free_netdev;
584 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
586 if (tb[IFLA_ADDRESS]) {
587 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
593 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
594 struct packet_type *pt, struct net_device *orig_dev)
596 struct sk_buff *local_skb;
598 if (!netif_running(dev))
601 if (dev->type != ARPHRD_IEEE802154)
604 /* check that it's our buffer */
605 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
606 /* Copy the packet so that the IPv6 header is
609 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
610 skb_tailroom(skb), GFP_ATOMIC);
614 local_skb->protocol = htons(ETH_P_IPV6);
615 local_skb->pkt_type = PACKET_HOST;
617 /* Pull off the 1-byte of 6lowpan header. */
618 skb_pull(local_skb, 1);
620 lowpan_give_skb_to_devices(local_skb, NULL);
622 kfree_skb(local_skb);
625 switch (skb->data[0] & 0xe0) {
626 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
627 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
628 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
629 local_skb = skb_clone(skb, GFP_ATOMIC);
632 process_data(local_skb);
641 return NET_RX_SUCCESS;
648 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
649 struct nlattr *tb[], struct nlattr *data[])
651 struct net_device *real_dev;
652 struct lowpan_dev_record *entry;
654 pr_debug("adding new link\n");
658 /* find and hold real wpan device */
659 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
662 if (real_dev->type != ARPHRD_IEEE802154) {
667 lowpan_dev_info(dev)->real_dev = real_dev;
668 lowpan_dev_info(dev)->fragment_tag = 0;
669 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
671 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
674 lowpan_dev_info(dev)->real_dev = NULL;
680 /* Set the lowpan harware address to the wpan hardware address. */
681 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
683 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
684 INIT_LIST_HEAD(&entry->list);
685 list_add_tail(&entry->list, &lowpan_devices);
686 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
688 register_netdevice(dev);
693 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
695 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
696 struct net_device *real_dev = lowpan_dev->real_dev;
697 struct lowpan_dev_record *entry, *tmp;
701 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
702 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
703 if (entry->ldev == dev) {
704 list_del(&entry->list);
708 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
710 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
712 unregister_netdevice_queue(dev, head);
717 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
719 .priv_size = sizeof(struct lowpan_dev_info),
720 .setup = lowpan_setup,
721 .newlink = lowpan_newlink,
722 .dellink = lowpan_dellink,
723 .validate = lowpan_validate,
726 static inline int __init lowpan_netlink_init(void)
728 return rtnl_link_register(&lowpan_link_ops);
731 static inline void lowpan_netlink_fini(void)
733 rtnl_link_unregister(&lowpan_link_ops);
736 static int lowpan_device_event(struct notifier_block *unused,
737 unsigned long event, void *ptr)
739 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
741 struct lowpan_dev_record *entry, *tmp;
743 if (dev->type != ARPHRD_IEEE802154)
746 if (event == NETDEV_UNREGISTER) {
747 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
748 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
749 lowpan_dellink(entry->ldev, &del_list);
752 unregister_netdevice_many(&del_list);
759 static struct notifier_block lowpan_dev_notifier = {
760 .notifier_call = lowpan_device_event,
763 static struct packet_type lowpan_packet_type = {
764 .type = __constant_htons(ETH_P_IEEE802154),
768 static int __init lowpan_init_module(void)
772 err = lowpan_netlink_init();
776 dev_add_pack(&lowpan_packet_type);
778 err = register_netdevice_notifier(&lowpan_dev_notifier);
780 dev_remove_pack(&lowpan_packet_type);
781 lowpan_netlink_fini();
787 static void __exit lowpan_cleanup_module(void)
789 struct lowpan_fragment *frame, *tframe;
791 lowpan_netlink_fini();
793 dev_remove_pack(&lowpan_packet_type);
795 unregister_netdevice_notifier(&lowpan_dev_notifier);
797 /* Now 6lowpan packet_type is removed, so no new fragments are
798 * expected on RX, therefore that's the time to clean incomplete
801 spin_lock_bh(&flist_lock);
802 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
803 del_timer_sync(&frame->timer);
804 list_del(&frame->list);
805 dev_kfree_skb(frame->skb);
808 spin_unlock_bh(&flist_lock);
811 module_init(lowpan_init_module);
812 module_exit(lowpan_cleanup_module);
813 MODULE_LICENSE("GPL");
814 MODULE_ALIAS_RTNL_LINK("lowpan");