2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
23 #include <linux/sunrpc/types.h>
24 #include <linux/sunrpc/xdr.h>
25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svcsock.h>
27 #include <linux/sunrpc/clnt.h>
29 #define RPCDBG_FACILITY RPCDBG_SVCDSP
31 static void svc_unregister(const struct svc_serv *serv);
33 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
36 * Mode for mapping cpus to pools.
39 SVC_POOL_AUTO = -1, /* choose one of the others */
40 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
41 * (legacy & UP mode) */
42 SVC_POOL_PERCPU, /* one pool per cpu */
43 SVC_POOL_PERNODE /* one pool per numa node */
45 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
48 * Structure for mapping cpus to pools and vice versa.
49 * Setup once during sunrpc initialisation.
51 static struct svc_pool_map {
52 int count; /* How many svc_servs use us */
53 int mode; /* Note: int not enum to avoid
54 * warnings about "enumeration value
55 * not handled in switch" */
57 unsigned int *pool_to; /* maps pool id to cpu or node */
58 unsigned int *to_pool; /* maps cpu or node to pool id */
61 .mode = SVC_POOL_DEFAULT
63 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
66 param_set_pool_mode(const char *val, struct kernel_param *kp)
68 int *ip = (int *)kp->arg;
69 struct svc_pool_map *m = &svc_pool_map;
72 mutex_lock(&svc_pool_map_mutex);
79 if (!strncmp(val, "auto", 4))
81 else if (!strncmp(val, "global", 6))
82 *ip = SVC_POOL_GLOBAL;
83 else if (!strncmp(val, "percpu", 6))
84 *ip = SVC_POOL_PERCPU;
85 else if (!strncmp(val, "pernode", 7))
86 *ip = SVC_POOL_PERNODE;
91 mutex_unlock(&svc_pool_map_mutex);
96 param_get_pool_mode(char *buf, struct kernel_param *kp)
98 int *ip = (int *)kp->arg;
103 return strlcpy(buf, "auto", 20);
104 case SVC_POOL_GLOBAL:
105 return strlcpy(buf, "global", 20);
106 case SVC_POOL_PERCPU:
107 return strlcpy(buf, "percpu", 20);
108 case SVC_POOL_PERNODE:
109 return strlcpy(buf, "pernode", 20);
111 return sprintf(buf, "%d", *ip);
115 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
116 &svc_pool_map.mode, 0644);
119 * Detect best pool mapping mode heuristically,
120 * according to the machine's topology.
123 svc_pool_map_choose_mode(void)
127 if (num_online_nodes() > 1) {
129 * Actually have multiple NUMA nodes,
130 * so split pools on NUMA node boundaries
132 return SVC_POOL_PERNODE;
135 node = any_online_node(node_online_map);
136 if (nr_cpus_node(node) > 2) {
138 * Non-trivial SMP, or CONFIG_NUMA on
139 * non-NUMA hardware, e.g. with a generic
140 * x86_64 kernel on Xeons. In this case we
141 * want to divide the pools on cpu boundaries.
143 return SVC_POOL_PERCPU;
146 /* default: one global pool */
147 return SVC_POOL_GLOBAL;
151 * Allocate the to_pool[] and pool_to[] arrays.
152 * Returns 0 on success or an errno.
155 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
157 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
160 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
173 * Initialise the pool map for SVC_POOL_PERCPU mode.
174 * Returns number of pools or <0 on error.
177 svc_pool_map_init_percpu(struct svc_pool_map *m)
179 unsigned int maxpools = nr_cpu_ids;
180 unsigned int pidx = 0;
184 err = svc_pool_map_alloc_arrays(m, maxpools);
188 for_each_online_cpu(cpu) {
189 BUG_ON(pidx > maxpools);
190 m->to_pool[cpu] = pidx;
191 m->pool_to[pidx] = cpu;
194 /* cpus brought online later all get mapped to pool0, sorry */
201 * Initialise the pool map for SVC_POOL_PERNODE mode.
202 * Returns number of pools or <0 on error.
205 svc_pool_map_init_pernode(struct svc_pool_map *m)
207 unsigned int maxpools = nr_node_ids;
208 unsigned int pidx = 0;
212 err = svc_pool_map_alloc_arrays(m, maxpools);
216 for_each_node_with_cpus(node) {
217 /* some architectures (e.g. SN2) have cpuless nodes */
218 BUG_ON(pidx > maxpools);
219 m->to_pool[node] = pidx;
220 m->pool_to[pidx] = node;
223 /* nodes brought online later all get mapped to pool0, sorry */
230 * Add a reference to the global map of cpus to pools (and
231 * vice versa). Initialise the map if we're the first user.
232 * Returns the number of pools.
235 svc_pool_map_get(void)
237 struct svc_pool_map *m = &svc_pool_map;
240 mutex_lock(&svc_pool_map_mutex);
243 mutex_unlock(&svc_pool_map_mutex);
247 if (m->mode == SVC_POOL_AUTO)
248 m->mode = svc_pool_map_choose_mode();
251 case SVC_POOL_PERCPU:
252 npools = svc_pool_map_init_percpu(m);
254 case SVC_POOL_PERNODE:
255 npools = svc_pool_map_init_pernode(m);
260 /* default, or memory allocation failure */
262 m->mode = SVC_POOL_GLOBAL;
266 mutex_unlock(&svc_pool_map_mutex);
272 * Drop a reference to the global map of cpus to pools.
273 * When the last reference is dropped, the map data is
274 * freed; this allows the sysadmin to change the pool
275 * mode using the pool_mode module option without
276 * rebooting or re-loading sunrpc.ko.
279 svc_pool_map_put(void)
281 struct svc_pool_map *m = &svc_pool_map;
283 mutex_lock(&svc_pool_map_mutex);
286 m->mode = SVC_POOL_DEFAULT;
292 mutex_unlock(&svc_pool_map_mutex);
297 * Set the given thread's cpus_allowed mask so that it
298 * will only run on cpus in the given pool.
301 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
303 struct svc_pool_map *m = &svc_pool_map;
304 unsigned int node = m->pool_to[pidx];
307 * The caller checks for sv_nrpools > 1, which
308 * implies that we've been initialized.
310 BUG_ON(m->count == 0);
313 case SVC_POOL_PERCPU:
315 set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
318 case SVC_POOL_PERNODE:
320 node_to_cpumask_ptr(nodecpumask, node);
321 set_cpus_allowed_ptr(task, nodecpumask);
328 * Use the mapping mode to choose a pool for a given CPU.
329 * Used when enqueueing an incoming RPC. Always returns
330 * a non-NULL pool pointer.
333 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
335 struct svc_pool_map *m = &svc_pool_map;
336 unsigned int pidx = 0;
339 * An uninitialised map happens in a pure client when
340 * lockd is brought up, so silently treat it the
341 * same as SVC_POOL_GLOBAL.
343 if (svc_serv_is_pooled(serv)) {
345 case SVC_POOL_PERCPU:
346 pidx = m->to_pool[cpu];
348 case SVC_POOL_PERNODE:
349 pidx = m->to_pool[cpu_to_node(cpu)];
353 return &serv->sv_pools[pidx % serv->sv_nrpools];
358 * Create an RPC service
360 static struct svc_serv *
361 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
362 sa_family_t family, void (*shutdown)(struct svc_serv *serv))
364 struct svc_serv *serv;
366 unsigned int xdrsize;
369 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
371 serv->sv_family = family;
372 serv->sv_name = prog->pg_name;
373 serv->sv_program = prog;
374 serv->sv_nrthreads = 1;
375 serv->sv_stats = prog->pg_stats;
376 if (bufsize > RPCSVC_MAXPAYLOAD)
377 bufsize = RPCSVC_MAXPAYLOAD;
378 serv->sv_max_payload = bufsize? bufsize : 4096;
379 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
380 serv->sv_shutdown = shutdown;
383 prog->pg_lovers = prog->pg_nvers-1;
384 for (vers=0; vers<prog->pg_nvers ; vers++)
385 if (prog->pg_vers[vers]) {
386 prog->pg_hivers = vers;
387 if (prog->pg_lovers > vers)
388 prog->pg_lovers = vers;
389 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
390 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
392 prog = prog->pg_next;
394 serv->sv_xdrsize = xdrsize;
395 INIT_LIST_HEAD(&serv->sv_tempsocks);
396 INIT_LIST_HEAD(&serv->sv_permsocks);
397 init_timer(&serv->sv_temptimer);
398 spin_lock_init(&serv->sv_lock);
400 serv->sv_nrpools = npools;
402 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
404 if (!serv->sv_pools) {
409 for (i = 0; i < serv->sv_nrpools; i++) {
410 struct svc_pool *pool = &serv->sv_pools[i];
412 dprintk("svc: initialising pool %u for %s\n",
416 INIT_LIST_HEAD(&pool->sp_threads);
417 INIT_LIST_HEAD(&pool->sp_sockets);
418 INIT_LIST_HEAD(&pool->sp_all_threads);
419 spin_lock_init(&pool->sp_lock);
422 /* Remove any stale portmap registrations */
423 svc_unregister(serv);
429 svc_create(struct svc_program *prog, unsigned int bufsize,
430 sa_family_t family, void (*shutdown)(struct svc_serv *serv))
432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown);
434 EXPORT_SYMBOL(svc_create);
437 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
438 sa_family_t family, void (*shutdown)(struct svc_serv *serv),
439 svc_thread_fn func, struct module *mod)
441 struct svc_serv *serv;
442 unsigned int npools = svc_pool_map_get();
444 serv = __svc_create(prog, bufsize, npools, family, shutdown);
447 serv->sv_function = func;
448 serv->sv_module = mod;
453 EXPORT_SYMBOL(svc_create_pooled);
456 * Destroy an RPC service. Should be called with appropriate locking to
457 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
460 svc_destroy(struct svc_serv *serv)
462 dprintk("svc: svc_destroy(%s, %d)\n",
463 serv->sv_program->pg_name,
466 if (serv->sv_nrthreads) {
467 if (--(serv->sv_nrthreads) != 0) {
468 svc_sock_update_bufs(serv);
472 printk("svc_destroy: no threads for serv=%p!\n", serv);
474 del_timer_sync(&serv->sv_temptimer);
476 svc_close_all(&serv->sv_tempsocks);
478 if (serv->sv_shutdown)
479 serv->sv_shutdown(serv);
481 svc_close_all(&serv->sv_permsocks);
483 BUG_ON(!list_empty(&serv->sv_permsocks));
484 BUG_ON(!list_empty(&serv->sv_tempsocks));
486 cache_clean_deferred(serv);
488 if (svc_serv_is_pooled(serv))
491 svc_unregister(serv);
492 kfree(serv->sv_pools);
495 EXPORT_SYMBOL(svc_destroy);
498 * Allocate an RPC server's buffer space.
499 * We allocate pages and place them in rq_argpages.
502 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
504 unsigned int pages, arghi;
506 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
507 * We assume one is at most one page
510 BUG_ON(pages > RPCSVC_MAXPAGES);
512 struct page *p = alloc_page(GFP_KERNEL);
515 rqstp->rq_pages[arghi++] = p;
522 * Release an RPC server buffer
525 svc_release_buffer(struct svc_rqst *rqstp)
529 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
530 if (rqstp->rq_pages[i])
531 put_page(rqstp->rq_pages[i]);
535 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
537 struct svc_rqst *rqstp;
539 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
543 init_waitqueue_head(&rqstp->rq_wait);
545 serv->sv_nrthreads++;
546 spin_lock_bh(&pool->sp_lock);
547 pool->sp_nrthreads++;
548 list_add(&rqstp->rq_all, &pool->sp_all_threads);
549 spin_unlock_bh(&pool->sp_lock);
550 rqstp->rq_server = serv;
551 rqstp->rq_pool = pool;
553 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
557 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
561 if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
566 svc_exit_thread(rqstp);
568 return ERR_PTR(-ENOMEM);
570 EXPORT_SYMBOL(svc_prepare_thread);
573 * Choose a pool in which to create a new thread, for svc_set_num_threads
575 static inline struct svc_pool *
576 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
581 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
585 * Choose a thread to kill, for svc_set_num_threads
587 static inline struct task_struct *
588 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
591 struct task_struct *task = NULL;
594 spin_lock_bh(&pool->sp_lock);
596 /* choose a pool in round-robin fashion */
597 for (i = 0; i < serv->sv_nrpools; i++) {
598 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
599 spin_lock_bh(&pool->sp_lock);
600 if (!list_empty(&pool->sp_all_threads))
602 spin_unlock_bh(&pool->sp_lock);
608 if (!list_empty(&pool->sp_all_threads)) {
609 struct svc_rqst *rqstp;
612 * Remove from the pool->sp_all_threads list
613 * so we don't try to kill it again.
615 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
616 list_del_init(&rqstp->rq_all);
617 task = rqstp->rq_task;
619 spin_unlock_bh(&pool->sp_lock);
625 * Create or destroy enough new threads to make the number
626 * of threads the given number. If `pool' is non-NULL, applies
627 * only to threads in that pool, otherwise round-robins between
628 * all pools. Must be called with a svc_get() reference and
629 * the BKL or another lock to protect access to svc_serv fields.
631 * Destroying threads relies on the service threads filling in
632 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
633 * has been created using svc_create_pooled().
635 * Based on code that used to be in nfsd_svc() but tweaked
639 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
641 struct svc_rqst *rqstp;
642 struct task_struct *task;
643 struct svc_pool *chosen_pool;
645 unsigned int state = serv->sv_nrthreads-1;
648 /* The -1 assumes caller has done a svc_get() */
649 nrservs -= (serv->sv_nrthreads-1);
651 spin_lock_bh(&pool->sp_lock);
652 nrservs -= pool->sp_nrthreads;
653 spin_unlock_bh(&pool->sp_lock);
656 /* create new threads */
657 while (nrservs > 0) {
659 chosen_pool = choose_pool(serv, pool, &state);
661 rqstp = svc_prepare_thread(serv, chosen_pool);
663 error = PTR_ERR(rqstp);
667 __module_get(serv->sv_module);
668 task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
670 error = PTR_ERR(task);
671 module_put(serv->sv_module);
672 svc_exit_thread(rqstp);
676 rqstp->rq_task = task;
677 if (serv->sv_nrpools > 1)
678 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
680 svc_sock_update_bufs(serv);
681 wake_up_process(task);
683 /* destroy old threads */
684 while (nrservs < 0 &&
685 (task = choose_victim(serv, pool, &state)) != NULL) {
686 send_sig(SIGINT, task, 1);
692 EXPORT_SYMBOL(svc_set_num_threads);
695 * Called from a server thread as it's exiting. Caller must hold the BKL or
696 * the "service mutex", whichever is appropriate for the service.
699 svc_exit_thread(struct svc_rqst *rqstp)
701 struct svc_serv *serv = rqstp->rq_server;
702 struct svc_pool *pool = rqstp->rq_pool;
704 svc_release_buffer(rqstp);
705 kfree(rqstp->rq_resp);
706 kfree(rqstp->rq_argp);
707 kfree(rqstp->rq_auth_data);
709 spin_lock_bh(&pool->sp_lock);
710 pool->sp_nrthreads--;
711 list_del(&rqstp->rq_all);
712 spin_unlock_bh(&pool->sp_lock);
716 /* Release the server */
720 EXPORT_SYMBOL(svc_exit_thread);
722 #ifdef CONFIG_SUNRPC_REGISTER_V4
724 * Registering kernel RPC services with rpcbind version 2 will work
725 * over either IPv4 or IPv6, since the Linux kernel always registers
726 * services for the "any" address.
728 * However, the local rpcbind daemon listens on either only AF_INET
729 * or AF_INET6 (never both). When it listens on AF_INET6, an rpcbind
730 * version 2 registration will result in registering the service at
731 * IN6ADDR_ANY, even if the RPC service being registered is not
734 * Rpcbind version 4 allows us to be a little more specific. Kernel
735 * RPC services that don't yet support AF_INET6 can register
736 * themselves as IPv4-only with the local rpcbind daemon, even if the
737 * daemon is listening only on AF_INET6.
739 * And, registering IPv6-enabled kernel RPC services via AF_INET6
740 * verifies that the local user space rpcbind daemon is properly
741 * configured to support remote AF_INET6 rpcbind requests.
743 * An AF_INET6 registration request will fail if the local rpcbind
744 * daemon is not set up to listen on AF_INET6. Likewise, we fail
745 * AF_INET6 registration requests if svc_register() is configured to
746 * support only rpcbind version 2.
748 static int __svc_register(const u32 program, const u32 version,
749 const sa_family_t family,
750 const unsigned short protocol,
751 const unsigned short port)
753 struct sockaddr_in sin = {
754 .sin_family = AF_INET,
755 .sin_addr.s_addr = htonl(INADDR_ANY),
756 .sin_port = htons(port),
758 struct sockaddr_in6 sin6 = {
759 .sin6_family = AF_INET6,
760 .sin6_addr = IN6ADDR_ANY_INIT,
761 .sin6_port = htons(port),
763 struct sockaddr *sap;
768 sap = (struct sockaddr *)&sin;
769 netid = RPCBIND_NETID_TCP;
770 if (protocol == IPPROTO_UDP)
771 netid = RPCBIND_NETID_UDP;
774 sap = (struct sockaddr *)&sin6;
775 netid = RPCBIND_NETID_TCP6;
776 if (protocol == IPPROTO_UDP)
777 netid = RPCBIND_NETID_UDP6;
780 return -EAFNOSUPPORT;
783 return rpcb_v4_register(program, version, sap, netid);
786 static int __svc_register(const u32 program, const u32 version,
788 const unsigned short protocol,
789 const unsigned short port)
791 if (family != AF_INET)
792 return -EAFNOSUPPORT;
794 return rpcb_register(program, version, protocol, port);
799 * svc_register - register an RPC service with the local portmapper
800 * @serv: svc_serv struct for the service to register
801 * @proto: transport protocol number to advertise
802 * @port: port to advertise
804 * Service is registered for any address in serv's address family
806 int svc_register(const struct svc_serv *serv, const unsigned short proto,
807 const unsigned short port)
809 struct svc_program *progp;
813 BUG_ON(proto == 0 && port == 0);
815 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
816 for (i = 0; i < progp->pg_nvers; i++) {
817 if (progp->pg_vers[i] == NULL)
820 dprintk("svc: svc_register(%s, %u, %s, %u, %d)%s\n",
823 proto == IPPROTO_UDP? "udp" : "tcp",
826 progp->pg_vers[i]->vs_hidden?
827 " (but not telling portmap)" : "");
829 if (progp->pg_vers[i]->vs_hidden)
832 error = __svc_register(progp->pg_prog, i,
833 serv->sv_family, proto, port);
843 * All transport protocols and ports for this service are removed
844 * from the local rpcbind database if the service is not hidden.
846 * The result of unregistration is reported via dprintk for those
847 * who want verification of the result, but is otherwise not
850 * The local rpcbind daemon listens on either only IPv6 or only
851 * IPv4. The kernel can't tell how it's configured. However,
852 * AF_INET addresses are mapped to AF_INET6 in IPv6-only config-
853 * urations, so even an unregistration request on AF_INET will
854 * get to a local rpcbind daemon listening only on AF_INET6. So
855 * we always unregister via AF_INET.
857 * At this point we don't need rpcbind version 4 for unregis-
858 * tration: A v2 UNSET request will clear all transports (netids),
859 * addresses, and address families for [program, version].
861 static void svc_unregister(const struct svc_serv *serv)
863 struct svc_program *progp;
868 clear_thread_flag(TIF_SIGPENDING);
870 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
871 for (i = 0; i < progp->pg_nvers; i++) {
872 if (progp->pg_vers[i] == NULL)
874 if (progp->pg_vers[i]->vs_hidden)
877 error = rpcb_register(progp->pg_prog, i, 0, 0);
878 dprintk("svc: svc_unregister(%sv%u), error %d\n",
879 progp->pg_name, i, error);
883 spin_lock_irqsave(¤t->sighand->siglock, flags);
885 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
889 * Printk the given error with the address of the client that caused it.
892 __attribute__ ((format (printf, 2, 3)))
893 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
897 char buf[RPC_MAX_ADDRBUFLEN];
899 if (!net_ratelimit())
902 printk(KERN_WARNING "svc: %s: ",
903 svc_print_addr(rqstp, buf, sizeof(buf)));
906 r = vprintk(fmt, args);
913 * Process the RPC request.
916 svc_process(struct svc_rqst *rqstp)
918 struct svc_program *progp;
919 struct svc_version *versp = NULL; /* compiler food */
920 struct svc_procedure *procp = NULL;
921 struct kvec * argv = &rqstp->rq_arg.head[0];
922 struct kvec * resv = &rqstp->rq_res.head[0];
923 struct svc_serv *serv = rqstp->rq_server;
926 u32 dir, prog, vers, proc;
927 __be32 auth_stat, rpc_stat;
931 rpc_stat = rpc_success;
933 if (argv->iov_len < 6*4)
936 /* setup response xdr_buf.
937 * Initially it has just one page
939 rqstp->rq_resused = 1;
940 resv->iov_base = page_address(rqstp->rq_respages[0]);
942 rqstp->rq_res.pages = rqstp->rq_respages + 1;
943 rqstp->rq_res.len = 0;
944 rqstp->rq_res.page_base = 0;
945 rqstp->rq_res.page_len = 0;
946 rqstp->rq_res.buflen = PAGE_SIZE;
947 rqstp->rq_res.tail[0].iov_base = NULL;
948 rqstp->rq_res.tail[0].iov_len = 0;
949 /* Will be turned off only in gss privacy case: */
950 rqstp->rq_splice_ok = 1;
952 /* Setup reply header */
953 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
955 rqstp->rq_xid = svc_getu32(argv);
956 svc_putu32(resv, rqstp->rq_xid);
958 dir = svc_getnl(argv);
959 vers = svc_getnl(argv);
961 /* First words of reply: */
962 svc_putnl(resv, 1); /* REPLY */
964 if (dir != 0) /* direction != CALL */
966 if (vers != 2) /* RPC version number */
969 /* Save position in case we later decide to reject: */
970 reply_statp = resv->iov_base + resv->iov_len;
972 svc_putnl(resv, 0); /* ACCEPT */
974 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
975 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
976 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
978 progp = serv->sv_program;
980 for (progp = serv->sv_program; progp; progp = progp->pg_next)
981 if (prog == progp->pg_prog)
985 * Decode auth data, and add verifier to reply buffer.
986 * We do this before anything else in order to get a decent
989 auth_res = svc_authenticate(rqstp, &auth_stat);
990 /* Also give the program a chance to reject this call: */
991 if (auth_res == SVC_OK && progp) {
992 auth_stat = rpc_autherr_badcred;
993 auth_res = progp->pg_authenticate(rqstp);
1001 rpc_stat = rpc_system_err;
1014 if (vers >= progp->pg_nvers ||
1015 !(versp = progp->pg_vers[vers]))
1018 procp = versp->vs_proc + proc;
1019 if (proc >= versp->vs_nproc || !procp->pc_func)
1021 rqstp->rq_server = serv;
1022 rqstp->rq_procinfo = procp;
1024 /* Syntactic check complete */
1025 serv->sv_stats->rpccnt++;
1027 /* Build the reply header. */
1028 statp = resv->iov_base +resv->iov_len;
1029 svc_putnl(resv, RPC_SUCCESS);
1031 /* Bump per-procedure stats counter */
1034 /* Initialize storage for argp and resp */
1035 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1036 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1038 /* un-reserve some of the out-queue now that we have a
1039 * better idea of reply size
1041 if (procp->pc_xdrressize)
1042 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1044 /* Call the function that processes the request. */
1045 if (!versp->vs_dispatch) {
1046 /* Decode arguments */
1047 xdr = procp->pc_decode;
1048 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1051 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1054 if (*statp == rpc_drop_reply) {
1055 if (procp->pc_release)
1056 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1059 if (*statp == rpc_success && (xdr = procp->pc_encode)
1060 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1061 dprintk("svc: failed to encode reply\n");
1062 /* serv->sv_stats->rpcsystemerr++; */
1063 *statp = rpc_system_err;
1066 dprintk("svc: calling dispatcher\n");
1067 if (!versp->vs_dispatch(rqstp, statp)) {
1068 /* Release reply info */
1069 if (procp->pc_release)
1070 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1075 /* Check RPC status result */
1076 if (*statp != rpc_success)
1077 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1079 /* Release reply info */
1080 if (procp->pc_release)
1081 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1083 if (procp->pc_encode == NULL)
1087 if (svc_authorise(rqstp))
1089 return svc_send(rqstp);
1092 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1093 dprintk("svc: svc_process dropit\n");
1098 svc_printk(rqstp, "short len %Zd, dropping request\n",
1101 goto dropit; /* drop request */
1104 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1106 serv->sv_stats->rpcbadfmt++;
1107 goto dropit; /* drop request */
1110 serv->sv_stats->rpcbadfmt++;
1111 svc_putnl(resv, 1); /* REJECT */
1112 svc_putnl(resv, 0); /* RPC_MISMATCH */
1113 svc_putnl(resv, 2); /* Only RPCv2 supported */
1118 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1119 serv->sv_stats->rpcbadauth++;
1120 /* Restore write pointer to location of accept status: */
1121 xdr_ressize_check(rqstp, reply_statp);
1122 svc_putnl(resv, 1); /* REJECT */
1123 svc_putnl(resv, 1); /* AUTH_ERROR */
1124 svc_putnl(resv, ntohl(auth_stat)); /* status */
1128 dprintk("svc: unknown program %d\n", prog);
1129 serv->sv_stats->rpcbadfmt++;
1130 svc_putnl(resv, RPC_PROG_UNAVAIL);
1134 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1135 vers, prog, progp->pg_name);
1137 serv->sv_stats->rpcbadfmt++;
1138 svc_putnl(resv, RPC_PROG_MISMATCH);
1139 svc_putnl(resv, progp->pg_lovers);
1140 svc_putnl(resv, progp->pg_hivers);
1144 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1146 serv->sv_stats->rpcbadfmt++;
1147 svc_putnl(resv, RPC_PROC_UNAVAIL);
1151 svc_printk(rqstp, "failed to decode args\n");
1153 rpc_stat = rpc_garbage_args;
1155 serv->sv_stats->rpcbadfmt++;
1156 svc_putnl(resv, ntohl(rpc_stat));
1159 EXPORT_SYMBOL(svc_process);
1162 * Return (transport-specific) limit on the rpc payload.
1164 u32 svc_max_payload(const struct svc_rqst *rqstp)
1166 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1168 if (rqstp->rq_server->sv_max_payload < max)
1169 max = rqstp->rq_server->sv_max_payload;
1172 EXPORT_SYMBOL_GPL(svc_max_payload);