1 //==========================================================================
3 // sys/kern/uipc_socket.c
7 //==========================================================================
8 //####BSDCOPYRIGHTBEGIN####
10 // -------------------------------------------
12 // Portions of this software may have been derived from OpenBSD or other sources,
13 // and are covered by the appropriate copyright disclaimers included herein.
15 // -------------------------------------------
17 //####BSDCOPYRIGHTEND####
18 //==========================================================================
19 //#####DESCRIPTIONBEGIN####
22 // Contributors: gthomas
28 //####DESCRIPTIONEND####
30 //==========================================================================
33 /* $OpenBSD: uipc_socket.c,v 1.27 1999/10/14 08:18:49 cmetz Exp $ */
34 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
37 * Copyright (c) 1982, 1986, 1988, 1990, 1993
38 * The Regents of the University of California. All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
71 #include <sys/param.h>
73 #include <cyg/io/file.h>
75 #include <sys/systm.h>
79 #include <sys/malloc.h>
81 #include <sys/domain.h>
82 #include <sys/kernel.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
87 #include <sys/signalvar.h>
88 #include <sys/resourcevar.h>
93 #endif /* SOMINCONN */
95 int somaxconn = SOMAXCONN;
96 int sominconn = SOMINCONN;
99 * Socket operation routines.
100 * These routines are called by the routines in
101 * sys_socket.c or from a system process, and
102 * implement the semantics of socket operations by
103 * switching out to the protocol specific routines.
107 socreate(dom, aso, type, proto)
114 struct proc *p = curproc; /* XXX */
116 register struct protosw *prp;
117 register struct socket *so;
121 prp = pffindproto(dom, proto, type);
123 prp = pffindtype(dom, type);
124 if (prp == 0 || prp->pr_usrreq == 0)
125 return (EPROTONOSUPPORT);
126 if (prp->pr_type != type)
128 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
129 bzero((caddr_t)so, sizeof(*so));
132 so->so_state = SS_PRIV;
133 so->so_ruid = 0; // FIXME
134 so->so_euid = 0; // FIXME
136 if (p->p_ucred->cr_uid == 0)
137 so->so_state = SS_PRIV;
138 so->so_ruid = p->p_cred->p_ruid;
139 so->so_euid = p->p_ucred->cr_uid;
143 (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, (struct mbuf *)(long)proto,
146 so->so_state |= SS_NOFDREF;
152 extern struct emul emul_sunos;
153 if (p->p_emul == &emul_sunos && type == SOCK_DGRAM)
154 so->so_options |= SO_BROADCAST;
166 int s = splsoftnet();
169 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL);
175 solisten(so, backlog)
176 register struct socket *so;
179 int s = splsoftnet(), error;
181 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL);
187 so->so_options |= SO_ACCEPTCONN;
188 if (backlog < 0 || backlog > somaxconn)
190 if (backlog < sominconn)
192 so->so_qlimit = backlog;
199 register struct socket *so;
202 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
206 * We must not decommission a socket that's on the accept(2)
207 * queue. If we do, then accept(2) may hang after select(2)
208 * indicated that the listening socket was ready.
210 if (!soqremque(so, 0))
213 sbrelease(&so->so_snd);
219 * Close a socket on last file table reference removal.
220 * Initiate disconnect if connected.
221 * Free socket when disconnect complete.
225 register struct socket *so;
228 int s = splsoftnet(); /* conservative */
231 if (so->so_options & SO_ACCEPTCONN) {
232 while ((so2 = so->so_q0) != NULL) {
233 (void) soqremque(so2, 0);
236 while ((so2 = so->so_q) != NULL) {
237 (void) soqremque(so2, 1);
243 if (so->so_state & SS_ISCONNECTED) {
244 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
245 error = sodisconnect(so);
249 if (so->so_options & SO_LINGER) {
250 if ((so->so_state & SS_ISDISCONNECTING) &&
251 (so->so_state & SS_NBIO))
253 while (so->so_state & SS_ISCONNECTED) {
254 error = tsleep((caddr_t)&so->so_timeo,
255 PSOCK | PCATCH, netcls,
264 int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, NULL,
270 if (so->so_state & SS_NOFDREF)
271 panic("soclose: NOFDREF");
272 so->so_state |= SS_NOFDREF;
279 * Must be called at splsoftnet...
286 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL);
291 register struct socket *so;
294 int s = splsoftnet();
297 if ((so->so_state & SS_NOFDREF) == 0)
298 panic("soaccept: !NOFDREF");
299 so->so_state &= ~SS_NOFDREF;
300 if ((so->so_state & SS_ISDISCONNECTED) == 0)
301 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL,
309 register struct socket *so;
315 if (so->so_options & SO_ACCEPTCONN)
319 * If protocol is connection-based, can only connect once.
320 * Otherwise, if connected, try to disconnect first.
321 * This allows user to disconnect by connecting to, e.g.,
324 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
325 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
326 (error = sodisconnect(so))))
329 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
337 register struct socket *so1;
340 int s = splsoftnet();
343 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL,
344 (struct mbuf *)so2, NULL);
351 register struct socket *so;
353 int s = splsoftnet();
356 if ((so->so_state & SS_ISCONNECTED) == 0) {
360 if (so->so_state & SS_ISDISCONNECTING) {
364 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL,
371 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
374 * If send must go all at once and message is larger than
375 * send buffering, then hard error.
376 * Lock against other senders.
377 * If must go all at once and not enough room now, then
378 * inform user that this would block and do nothing.
379 * Otherwise, if nonblocking, send as much as possible.
380 * The data to be sent is described by "uio" if nonzero,
381 * otherwise by the mbuf chain "top" (which must be null
382 * if uio is not). Data provided in mbuf chain must be small
383 * enough to send all at once.
385 * Returns nonzero on error, timeout or signal; callers
386 * must check for short counts if EINTR/ERESTART are returned.
387 * Data and control buffers are freed on return.
390 sosend(so, addr, uio, top, control, flags)
391 register struct socket *so;
395 struct mbuf *control;
399 struct proc *p = curproc; /* XXX */
402 register struct mbuf *m;
403 register long space, len;
404 register quad_t resid;
405 int clen = 0, error, s, dontroute, mlen;
406 int atomic = sosendallatonce(so) || top;
409 resid = uio->uio_resid;
411 resid = top->m_pkthdr.len;
413 * In theory resid should be unsigned (since uio->uio_resid is).
414 * However, space must be signed, as it might be less than 0
415 * if we over-committed, and we must use a signed comparison
416 * of space and resid. On the other hand, a negative resid
417 * causes us to loop sending 0-length segments to the protocol.
418 * MSG_EOR on a SOCK_STREAM socket is also invalid.
421 (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
426 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
427 (so->so_proto->pr_flags & PR_ATOMIC);
429 p->p_stats->p_ru.ru_msgsnd++;
432 clen = control->m_len;
433 #define snderr(errno) { error = errno; splx(s); goto release; }
436 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
440 if (so->so_state & SS_CANTSENDMORE)
443 snderr(so->so_error);
444 if ((so->so_state & SS_ISCONNECTED) == 0) {
445 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
446 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
447 !(resid == 0 && clen != 0))
449 } else if (addr == 0)
450 snderr(EDESTADDRREQ);
452 space = sbspace(&so->so_snd);
455 if ((atomic && resid > so->so_snd.sb_hiwat) ||
456 clen > so->so_snd.sb_hiwat)
458 if (space < resid + clen && uio &&
459 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
460 if (so->so_state & SS_NBIO)
462 sbunlock(&so->so_snd);
463 error = sbwait(&so->so_snd);
475 * Data is prepackaged in "top".
479 top->m_flags |= M_EOR;
482 MGETHDR(m, M_WAIT, MT_DATA);
485 m->m_pkthdr.rcvif = (struct ifnet *)0;
487 MGET(m, M_WAIT, MT_DATA);
490 if (resid >= MINCLSIZE && space >= MCLBYTES) {
492 if ((m->m_flags & M_EXT) == 0)
496 len = min(MCLBYTES, resid);
498 if (atomic && top == 0) {
499 len = min(MCLBYTES - max_hdr, resid);
500 m->m_data += max_hdr;
502 len = min(MCLBYTES, resid);
507 len = min(min(mlen, resid), space);
510 * For datagram protocols, leave room
511 * for protocol headers in first mbuf.
513 if (atomic && top == 0 && len < mlen)
516 error = uiomove(mtod(m, caddr_t), (int)len, uio);
517 resid = uio->uio_resid;
520 top->m_pkthdr.len += len;
526 top->m_flags |= M_EOR;
529 } while (space > 0 && atomic);
531 so->so_options |= SO_DONTROUTE;
532 s = splsoftnet(); /* XXX */
533 error = (*so->so_proto->pr_usrreq)(so, (flags & MSG_OOB) ?
534 PRU_SENDOOB : PRU_SEND,
538 so->so_options &= ~SO_DONTROUTE;
545 } while (resid && space > 0);
549 sbunlock(&so->so_snd);
559 * Implement receive operations on a socket.
560 * We depend on the way that records are added to the sockbuf
561 * by sbappend*. In particular, each record (mbufs linked through m_next)
562 * must begin with an address if the protocol so specifies,
563 * followed by an optional mbuf or mbufs containing ancillary data,
564 * and then zero or more mbufs of data.
565 * In order to avoid blocking network interrupts for the entire time here,
566 * we splx() while doing the actual copy to user space.
567 * Although the sockbuf is locked, new data may still be appended,
568 * and thus we must maintain consistency of the sockbuf during that time.
570 * The caller may receive the data as a single mbuf chain by supplying
571 * an mbuf **mp0 for use in returning the chain. The uio is then used
572 * only for the count in uio_resid.
575 soreceive(so, paddr, uio, mp0, controlp, flagsp)
576 register struct socket *so;
580 struct mbuf **controlp;
583 register struct mbuf *m, **mp;
584 register int flags, len, error, s, offset;
585 struct protosw *pr = so->so_proto;
586 struct mbuf *nextrecord;
588 size_t orig_resid = uio->uio_resid;
598 flags = *flagsp &~ MSG_EOR;
601 if (so->so_state & SS_NBIO)
602 flags |= MSG_DONTWAIT;
603 if (flags & MSG_OOB) {
604 m = m_get(M_WAIT, MT_DATA);
605 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
606 (struct mbuf *)(long)(flags & MSG_PEEK), NULL);
610 error = uiomove(mtod(m, caddr_t),
611 (int) min(uio->uio_resid, m->m_len), uio);
613 } while (uio->uio_resid && error == 0 && m);
620 *mp = (struct mbuf *)0;
621 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
622 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL);
625 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
629 m = so->so_rcv.sb_mb;
631 * If we have less data than requested, block awaiting more
632 * (subject to any timeout) if:
633 * 1. the current count is less than the low water mark,
634 * 2. MSG_WAITALL is set, and it is possible to do the entire
635 * receive operation at once if we block (resid <= hiwat), or
636 * 3. MSG_DONTWAIT is not set.
637 * If MSG_WAITALL is set but resid is larger than the receive buffer,
638 * we have to do the receive in sections, and thus risk returning
639 * a short count if a timeout or signal occurs after we start.
641 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
642 so->so_rcv.sb_cc < uio->uio_resid) &&
643 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
644 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
645 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
647 if (m == 0 && so->so_rcv.sb_cc)
653 error = so->so_error;
654 if ((flags & MSG_PEEK) == 0)
658 if (so->so_state & SS_CANTRCVMORE) {
664 for (; m; m = m->m_next)
665 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
666 m = so->so_rcv.sb_mb;
669 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
670 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
674 if (uio->uio_resid == 0 && controlp == NULL)
676 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
680 sbunlock(&so->so_rcv);
681 error = sbwait(&so->so_rcv);
688 #ifdef notyet /* XXXX */
690 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
692 nextrecord = m->m_nextpkt;
693 if (pr->pr_flags & PR_ADDR) {
695 if (m->m_type != MT_SONAME)
699 if (flags & MSG_PEEK) {
701 *paddr = m_copy(m, 0, m->m_len);
704 sbfree(&so->so_rcv, m);
707 so->so_rcv.sb_mb = m->m_next;
709 m = so->so_rcv.sb_mb;
711 MFREE(m, so->so_rcv.sb_mb);
712 m = so->so_rcv.sb_mb;
716 while (m && m->m_type == MT_CONTROL && error == 0) {
717 if (flags & MSG_PEEK) {
719 *controlp = m_copy(m, 0, m->m_len);
722 sbfree(&so->so_rcv, m);
724 if (pr->pr_domain->dom_externalize &&
725 mtod(m, struct cmsghdr *)->cmsg_type ==
727 error = (*pr->pr_domain->dom_externalize)(m);
729 so->so_rcv.sb_mb = m->m_next;
731 m = so->so_rcv.sb_mb;
733 MFREE(m, so->so_rcv.sb_mb);
734 m = so->so_rcv.sb_mb;
739 controlp = &(*controlp)->m_next;
743 if ((flags & MSG_PEEK) == 0)
744 m->m_nextpkt = nextrecord;
746 if (type == MT_OOBDATA)
748 if (m->m_flags & M_BCAST)
750 if (m->m_flags & M_MCAST)
755 while (m && uio->uio_resid > 0 && error == 0) {
756 if (m->m_type == MT_OOBDATA) {
757 if (type != MT_OOBDATA)
759 } else if (type == MT_OOBDATA)
762 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
765 so->so_state &= ~SS_RCVATMARK;
766 len = uio->uio_resid;
767 if (so->so_oobmark && len > so->so_oobmark - offset)
768 len = so->so_oobmark - offset;
769 if (len > m->m_len - moff)
770 len = m->m_len - moff;
772 * If mp is set, just pass back the mbufs.
773 * Otherwise copy them out via the uio, then free.
774 * Sockbuf must be consistent here (points to current mbuf,
775 * it points to next record) when we drop priority;
776 * we must note any additions to the sockbuf when we
777 * block interrupts again.
779 if (mp == 0 && uio_error == 0) {
780 resid = uio->uio_resid;
783 uiomove(mtod(m, caddr_t) + moff, (int)len,
787 uio->uio_resid = resid - len;
789 uio->uio_resid -= len;
790 if (len == m->m_len - moff) {
791 if (m->m_flags & M_EOR)
793 if (flags & MSG_PEEK) {
797 nextrecord = m->m_nextpkt;
798 sbfree(&so->so_rcv, m);
802 so->so_rcv.sb_mb = m = m->m_next;
803 *mp = (struct mbuf *)0;
805 MFREE(m, so->so_rcv.sb_mb);
806 m = so->so_rcv.sb_mb;
809 m->m_nextpkt = nextrecord;
812 if (flags & MSG_PEEK)
816 *mp = m_copym(m, 0, len, M_WAIT);
819 so->so_rcv.sb_cc -= len;
822 if (so->so_oobmark) {
823 if ((flags & MSG_PEEK) == 0) {
824 so->so_oobmark -= len;
825 if (so->so_oobmark == 0) {
826 so->so_state |= SS_RCVATMARK;
831 if (offset == so->so_oobmark)
838 * If the MSG_WAITALL flag is set (for non-atomic socket),
839 * we must not quit until "uio->uio_resid == 0" or an error
840 * termination. If a signal/timeout occurs, return
841 * with a short count but without error.
842 * Keep sockbuf locked against other readers.
844 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
845 !sosendallatonce(so) && !nextrecord) {
846 if (so->so_error || so->so_state & SS_CANTRCVMORE)
848 error = sbwait(&so->so_rcv);
850 sbunlock(&so->so_rcv);
854 if ((m = so->so_rcv.sb_mb) != NULL)
855 nextrecord = m->m_nextpkt;
859 if (m && pr->pr_flags & PR_ATOMIC) {
861 if ((flags & MSG_PEEK) == 0)
862 (void) sbdroprecord(&so->so_rcv);
864 if ((flags & MSG_PEEK) == 0) {
866 so->so_rcv.sb_mb = nextrecord;
867 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
868 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
869 (struct mbuf *)(long)flags, NULL);
871 if (orig_resid == uio->uio_resid && orig_resid &&
872 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
873 sbunlock(&so->so_rcv);
884 sbunlock(&so->so_rcv);
891 register struct socket *so;
894 register struct protosw *pr = so->so_proto;
897 if (how & ~(FREAD|FWRITE))
902 return (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL);
908 register struct socket *so;
910 register struct sockbuf *sb = &so->so_rcv;
911 register struct protosw *pr = so->so_proto;
915 sb->sb_flags |= SB_NOINTR;
916 (void) sblock(sb, M_WAITOK);
921 bzero((caddr_t)sb, sizeof (*sb));
923 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
924 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
929 sosetopt(so, level, optname, m0)
930 register struct socket *so;
935 register struct mbuf *m = m0;
937 if (level != SOL_SOCKET) {
938 if (so->so_proto && so->so_proto->pr_ctloutput)
939 return ((*so->so_proto->pr_ctloutput)
940 (PRCO_SETOPT, so, level, optname, &m0));
946 if (m == NULL || m->m_len != sizeof (struct linger)) {
950 so->so_linger = mtod(m, struct linger *)->l_linger;
961 if (m == NULL || m->m_len < sizeof (int)) {
966 so->so_options |= optname;
968 so->so_options &= ~optname;
978 if (m == NULL || m->m_len < sizeof (int)) {
982 cnt = *mtod(m, int *);
989 if (sbreserve(optname == SO_SNDBUF ?
990 &so->so_snd : &so->so_rcv,
998 so->so_snd.sb_lowat = (cnt > so->so_snd.sb_hiwat) ?
999 so->so_snd.sb_hiwat : cnt;
1002 so->so_rcv.sb_lowat = (cnt > so->so_rcv.sb_hiwat) ?
1003 so->so_rcv.sb_hiwat : cnt;
1015 if (m == NULL || m->m_len < sizeof (*tv)) {
1019 tv = mtod(m, struct timeval *);
1020 if (tv->tv_sec * hz + tv->tv_usec / tick > SHRT_MAX) {
1024 val = tv->tv_sec * hz + tv->tv_usec / tick;
1029 so->so_snd.sb_timeo = val;
1032 so->so_rcv.sb_timeo = val;
1039 error = ENOPROTOOPT;
1042 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1043 (void) ((*so->so_proto->pr_ctloutput)
1044 (PRCO_SETOPT, so, level, optname, &m0));
1045 m = NULL; /* freed by protocol */
1055 sogetopt(so, level, optname, mp)
1056 register struct socket *so;
1060 register struct mbuf *m;
1062 if (level != SOL_SOCKET) {
1063 if (so->so_proto && so->so_proto->pr_ctloutput) {
1064 return ((*so->so_proto->pr_ctloutput)
1065 (PRCO_GETOPT, so, level, optname, mp));
1067 return (ENOPROTOOPT);
1069 m = m_get(M_WAIT, MT_SOOPTS);
1070 m->m_len = sizeof (int);
1075 m->m_len = sizeof (struct linger);
1076 mtod(m, struct linger *)->l_onoff =
1077 so->so_options & SO_LINGER;
1078 mtod(m, struct linger *)->l_linger = so->so_linger;
1081 case SO_USELOOPBACK:
1089 *mtod(m, int *) = so->so_options & optname;
1093 *mtod(m, int *) = so->so_type;
1097 *mtod(m, int *) = so->so_error;
1102 *mtod(m, int *) = so->so_snd.sb_hiwat;
1106 *mtod(m, int *) = so->so_rcv.sb_hiwat;
1110 *mtod(m, int *) = so->so_snd.sb_lowat;
1114 *mtod(m, int *) = so->so_rcv.sb_lowat;
1120 int val = (optname == SO_SNDTIMEO ?
1121 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1123 m->m_len = sizeof(struct timeval);
1124 mtod(m, struct timeval *)->tv_sec = val / hz;
1125 mtod(m, struct timeval *)->tv_usec =
1132 return (ENOPROTOOPT);
1141 register struct socket *so;
1144 csignal(so->so_pgid, SIGURG, so->so_siguid, so->so_sigeuid);
1146 selwakeup(&so->so_rcv.sb_sel);