From: Stephen Rothwell Date: Fri, 28 Jun 2013 05:29:57 +0000 (+1000) Subject: Merge remote-tracking branch 'staging/staging-next' X-Git-Tag: next-20130628~25 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=27edb6bd2c314e0c19e1e8f63b3ed7ed05d1c35d;p=karo-tx-linux.git Merge remote-tracking branch 'staging/staging-next' Conflicts: Documentation/devicetree/bindings/thermal/ti_soc_thermal.txt drivers/staging/serqt_usb2/serqt_usb2.c drivers/staging/silicom/bpctl_mod.c drivers/thermal/ti-soc-thermal/ti-thermal-common.c --- 27edb6bd2c314e0c19e1e8f63b3ed7ed05d1c35d diff --cc drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h index 000000000000,f0508084e8c5..67c464424ff4 mode 000000,100644..100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h @@@ -1,0 -1,85 +1,85 @@@ + /* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ + /* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ + /* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + + #ifndef LUSTRE_PATCHLESS_COMPAT_H + #define LUSTRE_PATCHLESS_COMPAT_H + + #include + + #include + #include + #include + + + #define ll_delete_from_page_cache(page) delete_from_page_cache(page) + + static inline void + truncate_complete_page(struct address_space *mapping, struct page *page) + { + if (page->mapping != mapping) + return; + + if (PagePrivate(page)) - page->mapping->a_ops->invalidatepage(page, 0); ++ page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + + cancel_dirty_page(page, PAGE_SIZE); + ClearPageMappedToDisk(page); + ll_delete_from_page_cache(page); + } + + # define d_refcount(d) ((d)->d_count) + + #ifdef ATTR_OPEN + # define ATTR_FROM_OPEN ATTR_OPEN + #else + # ifndef ATTR_FROM_OPEN + # define ATTR_FROM_OPEN 0 + # endif + #endif /* ATTR_OPEN */ + + #ifndef ATTR_RAW + #define ATTR_RAW 0 + #endif + + #ifndef ATTR_CTIME_SET + /* + * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other + * ATTR_* attributes (see bug 13828) + */ + #define ATTR_CTIME_SET (1 << 28) + #endif + + #endif /* LUSTRE_PATCHLESS_COMPAT_H */ diff --cc drivers/staging/lustre/lustre/llite/rw26.c index 000000000000,27e4e64bc1e7..f1a1c5f40a1d mode 000000,100644..100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@@ -1,0 -1,586 +1,587 @@@ + /* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ + /* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ + /* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/lustre/llite/rw26.c + * + * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version + */ + + #include + #include + #include + #include + #include + #include + #include + + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #define DEBUG_SUBSYSTEM S_LLITE + + #include + #include "llite_internal.h" + #include + + /** + * Implements Linux VM address_space::invalidatepage() method. This method is + * called when the page is truncate from a file, either as a result of + * explicit truncate, or when inode is removed from memory (as a result of + * final iput(), umount, or memory pressure induced icache shrinking). + * + * [0, offset] bytes of the page remain valid (this is for a case of not-page + * aligned truncate). Lustre leaves partially truncated page in the cache, + * relying on struct inode::i_size to limit further accesses. + */ -static void ll_invalidatepage(struct page *vmpage, unsigned long offset) ++static void ll_invalidatepage(struct page *vmpage, unsigned int offset, ++ unsigned int length) + { + struct inode *inode; + struct lu_env *env; + struct cl_page *page; + struct cl_object *obj; + + int refcheck; + + LASSERT(PageLocked(vmpage)); + LASSERT(!PageWriteback(vmpage)); + + /* + * It is safe to not check anything in invalidatepage/releasepage + * below because they are run with page locked and all our io is + * happening with locked page too + */ - if (offset == 0) { ++ if (offset == 0 && length == PAGE_CACHE_SIZE) { + env = cl_env_get(&refcheck); + if (!IS_ERR(env)) { + inode = vmpage->mapping->host; + obj = ll_i2info(inode)->lli_clob; + if (obj != NULL) { + page = cl_vmpage_page(vmpage, obj); + if (page != NULL) { + lu_ref_add(&page->cp_reference, + "delete", vmpage); + cl_page_delete(env, page); + lu_ref_del(&page->cp_reference, + "delete", vmpage); + cl_page_put(env, page); + } + } else + LASSERT(vmpage->private == 0); + cl_env_put(env, &refcheck); + } + } + } + + #ifdef HAVE_RELEASEPAGE_WITH_INT + #define RELEASEPAGE_ARG_TYPE int + #else + #define RELEASEPAGE_ARG_TYPE gfp_t + #endif + static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) + { + struct cl_env_nest nest; + struct lu_env *env; + struct cl_object *obj; + struct cl_page *page; + struct address_space *mapping; + int result; + + LASSERT(PageLocked(vmpage)); + if (PageWriteback(vmpage) || PageDirty(vmpage)) + return 0; + + mapping = vmpage->mapping; + if (mapping == NULL) + return 1; + + obj = ll_i2info(mapping->host)->lli_clob; + if (obj == NULL) + return 1; + + /* 1 for page allocator, 1 for cl_page and 1 for page cache */ + if (page_count(vmpage) > 3) + return 0; + + /* TODO: determine what gfp should be used by @gfp_mask. */ + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) + /* If we can't allocate an env we won't call cl_page_put() + * later on which further means it's impossible to drop + * page refcount by cl_page, so ask kernel to not free + * this page. */ + return 0; + + page = cl_vmpage_page(vmpage, obj); + result = page == NULL; + if (page != NULL) { + if (!cl_page_in_use(page)) { + result = 1; + cl_page_delete(env, page); + } + cl_page_put(env, page); + } + cl_env_nested_put(&nest, env); + return result; + } + + static int ll_set_page_dirty(struct page *vmpage) + { + #if 0 + struct cl_page *page = vvp_vmpage_page_transient(vmpage); + struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host); + struct vvp_page *cpg; + + /* + * XXX should page method be called here? + */ + LASSERT(&obj->co_cl == page->cp_obj); + cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); + /* + * XXX cannot do much here, because page is possibly not locked: + * sys_munmap()->... + * ->unmap_page_range()->zap_pte_range()->set_page_dirty(). + */ + vvp_write_pending(obj, cpg); + #endif + RETURN(__set_page_dirty_nobuffers(vmpage)); + } + + #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL + + static inline int ll_get_user_pages(int rw, unsigned long user_addr, + size_t size, struct page ***pages, + int *max_pages) + { + int result = -ENOMEM; + + /* set an arbitrary limit to prevent arithmetic overflow */ + if (size > MAX_DIRECTIO_SIZE) { + *pages = NULL; + return -EFBIG; + } + + *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + *max_pages -= user_addr >> PAGE_CACHE_SHIFT; + + OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); + if (*pages) { + down_read(¤t->mm->mmap_sem); + result = get_user_pages(current, current->mm, user_addr, + *max_pages, (rw == READ), 0, *pages, + NULL); + up_read(¤t->mm->mmap_sem); + if (unlikely(result <= 0)) + OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages)); + } + + return result; + } + + /* ll_free_user_pages - tear down page struct array + * @pages: array of page struct pointers underlying target buffer */ + static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) + { + int i; + + for (i = 0; i < npages; i++) { + if (pages[i] == NULL) + break; + if (do_dirty) + set_page_dirty_lock(pages[i]); + page_cache_release(pages[i]); + } + + OBD_FREE_LARGE(pages, npages * sizeof(*pages)); + } + + ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, + int rw, struct inode *inode, + struct ll_dio_pages *pv) + { + struct cl_page *clp; + struct cl_2queue *queue; + struct cl_object *obj = io->ci_obj; + int i; + ssize_t rc = 0; + loff_t file_offset = pv->ldp_start_offset; + long size = pv->ldp_size; + int page_count = pv->ldp_nr; + struct page **pages = pv->ldp_pages; + long page_size = cl_page_size(obj); + bool do_io; + int io_pages = 0; + ENTRY; + + queue = &io->ci_queue; + cl_2queue_init(queue); + for (i = 0; i < page_count; i++) { + if (pv->ldp_offsets) + file_offset = pv->ldp_offsets[i]; + + LASSERT(!(file_offset & (page_size - 1))); + clp = cl_page_find(env, obj, cl_index(obj, file_offset), + pv->ldp_pages[i], CPT_TRANSIENT); + if (IS_ERR(clp)) { + rc = PTR_ERR(clp); + break; + } + + rc = cl_page_own(env, io, clp); + if (rc) { + LASSERT(clp->cp_state == CPS_FREEING); + cl_page_put(env, clp); + break; + } + + do_io = true; + + /* check the page type: if the page is a host page, then do + * write directly */ + if (clp->cp_type == CPT_CACHEABLE) { + struct page *vmpage = cl_page_vmpage(env, clp); + struct page *src_page; + struct page *dst_page; + void *src; + void *dst; + + src_page = (rw == WRITE) ? pages[i] : vmpage; + dst_page = (rw == WRITE) ? vmpage : pages[i]; + + src = ll_kmap_atomic(src_page, KM_USER0); + dst = ll_kmap_atomic(dst_page, KM_USER1); + memcpy(dst, src, min(page_size, size)); + ll_kunmap_atomic(dst, KM_USER1); + ll_kunmap_atomic(src, KM_USER0); + + /* make sure page will be added to the transfer by + * cl_io_submit()->...->vvp_page_prep_write(). */ + if (rw == WRITE) + set_page_dirty(vmpage); + + if (rw == READ) { + /* do not issue the page for read, since it + * may reread a ra page which has NOT uptodate + * bit set. */ + cl_page_disown(env, io, clp); + do_io = false; + } + } + + if (likely(do_io)) { + cl_2queue_add(queue, clp); + + /* + * Set page clip to tell transfer formation engine + * that page has to be sent even if it is beyond KMS. + */ + cl_page_clip(env, clp, 0, min(size, page_size)); + + ++io_pages; + } + + /* drop the reference count for cl_page_find */ + cl_page_put(env, clp); + size -= page_size; + file_offset += page_size; + } + + if (rc == 0 && io_pages) { + rc = cl_io_submit_sync(env, io, + rw == READ ? CRT_READ : CRT_WRITE, + queue, 0); + } + if (rc == 0) + rc = pv->ldp_size; + + cl_2queue_discard(env, io, queue); + cl_2queue_disown(env, io, queue); + cl_2queue_fini(env, queue); + RETURN(rc); + } + EXPORT_SYMBOL(ll_direct_rw_pages); + + static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, + int rw, struct inode *inode, + struct address_space *mapping, + size_t size, loff_t file_offset, + struct page **pages, int page_count) + { + struct ll_dio_pages pvec = { .ldp_pages = pages, + .ldp_nr = page_count, + .ldp_size = size, + .ldp_offsets = NULL, + .ldp_start_offset = file_offset + }; + + return ll_direct_rw_pages(env, io, rw, inode, &pvec); + } + + #ifdef KMALLOC_MAX_SIZE + #define MAX_MALLOC KMALLOC_MAX_SIZE + #else + #define MAX_MALLOC (128 * 1024) + #endif + + /* This is the maximum size of a single O_DIRECT request, based on the + * kmalloc limit. We need to fit all of the brw_page structs, each one + * representing PAGE_SIZE worth of user data, into a single buffer, and + * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is + * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ + #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ + ~(DT_MAX_BRW_SIZE - 1)) + static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, + const struct iovec *iov, loff_t file_offset, + unsigned long nr_segs) + { + struct lu_env *env; + struct cl_io *io; + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + struct ccc_object *obj = cl_inode2ccc(inode); + long count = iov_length(iov, nr_segs); + long tot_bytes = 0, result = 0; + struct ll_inode_info *lli = ll_i2info(inode); + unsigned long seg = 0; + long size = MAX_DIO_SIZE; + int refcheck; + ENTRY; + + if (!lli->lli_has_smd) + RETURN(-EBADF); + + /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ + if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK)) + RETURN(-EINVAL); + + CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), " + "offset=%lld=%llx, pages %lu (max %lu)\n", + inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, + file_offset, file_offset, count >> PAGE_CACHE_SHIFT, + MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + + /* Check that all user buffers are aligned as well */ + for (seg = 0; seg < nr_segs; seg++) { + if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) || + (iov[seg].iov_len & ~CFS_PAGE_MASK)) + RETURN(-EINVAL); + } + + env = cl_env_get(&refcheck); + LASSERT(!IS_ERR(env)); + io = ccc_env_io(env)->cui_cl.cis_io; + LASSERT(io != NULL); + + /* 0. Need locking between buffered and direct access. and race with + * size changing by concurrent truncates and writes. + * 1. Need inode mutex to operate transient pages. + */ + if (rw == READ) + mutex_lock(&inode->i_mutex); + + LASSERT(obj->cob_transient_pages == 0); + for (seg = 0; seg < nr_segs; seg++) { + long iov_left = iov[seg].iov_len; + unsigned long user_addr = (unsigned long)iov[seg].iov_base; + + if (rw == READ) { + if (file_offset >= i_size_read(inode)) + break; + if (file_offset + iov_left > i_size_read(inode)) + iov_left = i_size_read(inode) - file_offset; + } + + while (iov_left > 0) { + struct page **pages; + int page_count, max_pages = 0; + long bytes; + + bytes = min(size, iov_left); + page_count = ll_get_user_pages(rw, user_addr, bytes, + &pages, &max_pages); + if (likely(page_count > 0)) { + if (unlikely(page_count < max_pages)) + bytes = page_count << PAGE_CACHE_SHIFT; + result = ll_direct_IO_26_seg(env, io, rw, inode, + file->f_mapping, + bytes, file_offset, + pages, page_count); + ll_free_user_pages(pages, max_pages, rw==READ); + } else if (page_count == 0) { + GOTO(out, result = -EFAULT); + } else { + result = page_count; + } + if (unlikely(result <= 0)) { + /* If we can't allocate a large enough buffer + * for the request, shrink it to a smaller + * PAGE_SIZE multiple and try again. + * We should always be able to kmalloc for a + * page worth of page pointers = 4MB on i386. */ + if (result == -ENOMEM && + size > (PAGE_CACHE_SIZE / sizeof(*pages)) * + PAGE_CACHE_SIZE) { + size = ((((size / 2) - 1) | + ~CFS_PAGE_MASK) + 1) & + CFS_PAGE_MASK; + CDEBUG(D_VFSTRACE,"DIO size now %lu\n", + size); + continue; + } + + GOTO(out, result); + } + + tot_bytes += result; + file_offset += result; + iov_left -= result; + user_addr += result; + } + } + out: + LASSERT(obj->cob_transient_pages == 0); + if (rw == READ) + mutex_unlock(&inode->i_mutex); + + if (tot_bytes > 0) { + if (rw == WRITE) { + struct lov_stripe_md *lsm; + + lsm = ccc_inode_lsm_get(inode); + LASSERT(lsm != NULL); + lov_stripe_lock(lsm); + obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); + lov_stripe_unlock(lsm); + ccc_inode_lsm_put(inode, lsm); + } + } + + cl_env_put(env, &refcheck); + RETURN(tot_bytes ? : result); + } + + static int ll_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) + { + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + struct page *page; + int rc; + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + ENTRY; + + page = grab_cache_page_write_begin(mapping, index, flags); + if (!page) + RETURN(-ENOMEM); + + *pagep = page; + + rc = ll_prepare_write(file, page, from, from + len); + if (rc) { + unlock_page(page); + page_cache_release(page); + } + RETURN(rc); + } + + static int ll_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) + { + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + int rc; + + rc = ll_commit_write(file, page, from, from + copied); + unlock_page(page); + page_cache_release(page); + + return rc ?: copied; + } + + #ifdef CONFIG_MIGRATION + int ll_migratepage(struct address_space *mapping, + struct page *newpage, struct page *page + , enum migrate_mode mode + ) + { + /* Always fail page migration until we have a proper implementation */ + return -EIO; + } + #endif + + #ifndef MS_HAS_NEW_AOPS + struct address_space_operations ll_aops = { + .readpage = ll_readpage, + // .readpages = ll_readpages, + .direct_IO = ll_direct_IO_26, + .writepage = ll_writepage, + .writepages = ll_writepages, + .set_page_dirty = ll_set_page_dirty, + .write_begin = ll_write_begin, + .write_end = ll_write_end, + .invalidatepage = ll_invalidatepage, + .releasepage = (void *)ll_releasepage, + #ifdef CONFIG_MIGRATION + .migratepage = ll_migratepage, + #endif + .bmap = NULL + }; + #else + struct address_space_operations_ext ll_aops = { + .orig_aops.readpage = ll_readpage, + // .orig_aops.readpages = ll_readpages, + .orig_aops.direct_IO = ll_direct_IO_26, + .orig_aops.writepage = ll_writepage, + .orig_aops.writepages = ll_writepages, + .orig_aops.set_page_dirty = ll_set_page_dirty, + .orig_aops.prepare_write = ll_prepare_write, + .orig_aops.commit_write = ll_commit_write, + .orig_aops.invalidatepage = ll_invalidatepage, + .orig_aops.releasepage = ll_releasepage, + #ifdef CONFIG_MIGRATION + .orig_aops.migratepage = ll_migratepage, + #endif + .orig_aops.bmap = NULL, + .write_begin = ll_write_begin, + .write_end = ll_write_end + }; + #endif diff --cc drivers/staging/rtl8192u/r8192U_core.c index a18430e2abbd,c880adcaf0fd..14c14c24ac50 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@@ -1271,12 -1144,12 +1144,12 @@@ struct sk_buff *DrvAggr_Aggregation(str /* Subframe drv Tx descriptor and firmware info setting */ skb = pSendList->tx_agg_frames[i]; tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)agg_skb->tail; - tx_fwinfo = (tx_fwinfo_819x_usb *)(agg_skb->tail + sizeof(tx_desc_819x_usb_aggr_subframe)); + tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)skb_tail_pointer(agg_skb); + tx_fwinfo = (tx_fwinfo_819x_usb *)(skb_tail_pointer(agg_skb) + sizeof(tx_desc_819x_usb_aggr_subframe)); - memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb)); + memset(tx_fwinfo, 0, sizeof(tx_fwinfo_819x_usb)); /* DWORD 0 */ - tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80)?1:0; + tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80) ? 1 : 0; tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate); tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur; tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate, tcb_desc); diff --cc drivers/staging/serqt_usb2/serqt_usb2.c index 880f5c0011f2,39de5e021ccb..73fc3cc19e33 --- a/drivers/staging/serqt_usb2/serqt_usb2.c +++ b/drivers/staging/serqt_usb2/serqt_usb2.c @@@ -870,10 -870,10 +870,10 @@@ static int qt_open(struct tty_struct *t usb_clear_halt(serial->dev, port->read_urb->pipe); port0->open_ports++; - result = qt_get_device(serial, &port0->DeviceData); + result = qt_get_device(serial, &port0->device_data); /* Port specific setups */ - result = qt_open_channel(serial, port->port_number, &ChannelData); - result = qt_open_channel(serial, port->number, &channel_data); ++ result = qt_open_channel(serial, port->port_number, &channel_data); if (result < 0) { dev_dbg(&port->dev, "qt_open_channel failed\n"); return result; @@@ -1239,23 -1245,25 +1239,23 @@@ static void qt_set_termios(struct tty_s /* Now determine flow control */ if (cflag & CRTSCTS) { - dev_dbg(&port->dev, "%s - Enabling HW flow control port %d\n", - __func__, port->number); + dev_dbg(&port->dev, "%s - Enabling HW flow control\n", __func__); /* Enable RTS/CTS flow control */ - status = BoxSetHW_FlowCtrl(port->serial, index, 1); + status = box_set_hw_flow_ctrl(port->serial, index, 1); if (status < 0) { - dev_dbg(&port->dev, "BoxSetHW_FlowCtrl failed\n"); + dev_dbg(&port->dev, "box_set_hw_flow_ctrl failed\n"); return; } } else { /* Disable RTS/CTS flow control */ dev_dbg(&port->dev, - "%s - disabling HW flow control port %d\n", - __func__, port->number); + "%s - disabling HW flow control\n", __func__); - status = BoxSetHW_FlowCtrl(port->serial, index, 0); + status = box_set_hw_flow_ctrl(port->serial, index, 0); if (status < 0) { - dev_dbg(&port->dev, "BoxSetHW_FlowCtrl failed\n"); + dev_dbg(&port->dev, "box_set_hw_flow_ctrl failed\n"); return; } @@@ -1324,12 -1332,12 +1324,12 @@@ static inline int qt_real_tiocmget(stru int status; unsigned int index; - index = tty->index - serial->minor; + index = port->port_number; status = - BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr); + box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr); if (status >= 0) { status = - BoxGetRegister(port->serial, index, + box_get_register(port->serial, index, MODEM_STATUS_REGISTER, &msr); } @@@ -1363,9 -1371,9 +1363,9 @@@ static inline int qt_real_tiocmset(stru int status; unsigned int index; - index = tty->index - serial->minor; + index = port->port_number; status = - BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr); + box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr); if (status < 0) return -ESPIPE; diff --cc drivers/staging/silicom/bpctl_mod.c index c8ddb99e8526,4b3a1ae250c6..48b9fb110acd --- a/drivers/staging/silicom/bpctl_mod.c +++ b/drivers/staging/silicom/bpctl_mod.c @@@ -133,8 -128,8 +128,8 @@@ static unsigned long str_to_hex(char *p static int bp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); - static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL; + static bpctl_dev_t *pbpctl_dev, *pbpctl_dev_m; int dev_num = 0, ret = 0, ret_d = 0, time_left = 0; /* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */ /* return NOTIFY_DONE; */