92222dd17d4324506cc19babddac30b57ade7175
[gd/samba/.git] / source3 / rpc_server / rpc_host.c
1 /*
2  *  RPC host
3  *
4  *  Implements samba-dcerpcd service.
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 /*
21  * This binary has two usage modes:
22  *
23  * In the normal case when invoked from smbd or winbind it is given a
24  * directory to scan via --libexec-rpcds and will invoke on demand any
25  * binaries it finds there starting with rpcd_ when a named pipe
26  * connection is requested.
27  *
28  * In the second mode it can be started explicitly from system startup
29  * scripts.
30  *
31  * When Samba is set up as an Active Directory Domain Controller the
32  * normal samba binary overrides and provides DCERPC services, whilst
33  * allowing samba-dcerpcd to provide the services that smbd used to
34  * provide in that set-up, such as SRVSVC.
35  *
36  * The second mode can also be useful for use outside of the Samba framework,
37  * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38  * it behaves like inetd and listens on sockets on behalf of RPC server
39  * implementations.
40  */
41
42 #include "replace.h"
43 #include <fnmatch.h>
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
49 #include "messages.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
73
74 extern bool override_logfile;
75
76 struct rpc_server;
77 struct rpc_work_process;
78
79 /*
80  * samba-dcerpcd state to keep track of rpcd_* servers.
81  */
82 struct rpc_host {
83         struct messaging_context *msg_ctx;
84         struct rpc_server **servers;
85         struct tdb_wrap *epmdb;
86
87         int worker_stdin[2];
88
89         bool np_helper;
90
91         /*
92          * If we're started with --np-helper but nobody contacts us,
93          * we need to exit after a while. This will be deleted once
94          * the first real client connects and our self-exit mechanism
95          * when we don't have any worker processes left kicks in.
96          */
97         struct tevent_timer *np_helper_shutdown;
98 };
99
100 /*
101  * Map a RPC interface to a name. Used when filling the endpoint
102  * mapper database
103  */
104 struct rpc_host_iface_name {
105         struct ndr_syntax_id iface;
106         char *name;
107 };
108
109 /*
110  * rpc_host representation for listening sockets. ncacn_ip_tcp might
111  * listen on multiple explicit IPs, all with the same port.
112  */
113 struct rpc_host_endpoint {
114         struct rpc_server *server;
115         struct dcerpc_binding *binding;
116         struct ndr_syntax_id *interfaces;
117         int *fds;
118         size_t num_fds;
119 };
120
121 /*
122  * Staging area until we sent the socket plus bind to the helper
123  */
124 struct rpc_host_pending_client {
125         struct rpc_host_pending_client *prev, *next;
126
127         /*
128          * Pointer for the destructor to remove us from the list of
129          * pending clients
130          */
131         struct rpc_server *server;
132
133         /*
134          * Waiter for client exit before a helper accepted the request
135          */
136         struct tevent_req *hangup_wait;
137
138         /*
139          * Info to pick the worker
140          */
141         struct ncacn_packet *bind_pkt;
142
143         /*
144          * This is what we send down to the worker
145          */
146         int sock;
147         struct rpc_host_client *client;
148 };
149
150 /*
151  * Representation of one worker process. For each rpcd_* executable
152  * there will be more of than one of these.
153  */
154 struct rpc_work_process {
155         pid_t pid;
156
157         /*
158          * !available means:
159          *
160          * Worker forked but did not send its initial status yet (not
161          * yet initialized)
162          *
163          * Worker died, but we did not receive SIGCHLD yet. We noticed
164          * it because we couldn't send it a message.
165          */
166         bool available;
167
168         /*
169          * Incremented by us when sending a client, decremented by
170          * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
171          * client exits.
172          */
173         uint32_t num_associations;
174         uint32_t num_connections;
175
176         /*
177          * Send SHUTDOWN to an idle child after a while
178          */
179         struct tevent_timer *exit_timer;
180 };
181
182 /*
183  * State for a set of running instances of an rpcd_* server executable
184  */
185 struct rpc_server {
186         struct rpc_host *host;
187         /*
188          * Index into the rpc_host_state->servers array
189          */
190         uint32_t server_index;
191
192         const char *rpc_server_exe;
193
194         struct rpc_host_endpoint **endpoints;
195         struct rpc_host_iface_name *iface_names;
196
197         size_t max_workers;
198         size_t idle_seconds;
199
200         /*
201          * "workers" can be larger than "max_workers": Internal
202          * connections require an idle worker to avoid deadlocks
203          * between RPC servers: netlogon requires samr, everybody
204          * requires winreg. And if a deep call in netlogon asks for a
205          * samr connection, this must never end up in the same
206          * process. named_pipe_auth_req_info8->need_idle_server is set
207          * in those cases.
208          */
209         struct rpc_work_process *workers;
210
211         struct rpc_host_pending_client *pending_clients;
212 };
213
214 struct rpc_server_get_endpoints_state {
215         char **argl;
216         char *ncalrpc_endpoint;
217         enum dcerpc_transport_t only_transport;
218
219         struct rpc_host_iface_name *iface_names;
220         struct rpc_host_endpoint **endpoints;
221
222         unsigned long num_workers;
223         unsigned long idle_seconds;
224 };
225
226 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
227
228 /**
229  * @brief Query interfaces from an rpcd helper
230  *
231  * Spawn a rpcd helper, ask it for the interfaces it serves via
232  * --list-interfaces, parse the output
233  *
234  * @param[in] mem_ctx Memory context for the tevent_req
235  * @param[in] ev Event context to run this on
236  * @param[in] rpc_server_exe Binary to ask with --list-interfaces
237  * @param[in] only_transport Filter out anything but this
238  * @return The tevent_req representing this process
239  */
240
241 static struct tevent_req *rpc_server_get_endpoints_send(
242         TALLOC_CTX *mem_ctx,
243         struct tevent_context *ev,
244         const char *rpc_server_exe,
245         enum dcerpc_transport_t only_transport)
246 {
247         struct tevent_req *req = NULL, *subreq = NULL;
248         struct rpc_server_get_endpoints_state *state = NULL;
249         const char *progname = NULL;
250
251         req = tevent_req_create(
252                 mem_ctx, &state, struct rpc_server_get_endpoints_state);
253         if (req == NULL) {
254                 return NULL;
255         }
256         state->only_transport = only_transport;
257
258         progname = strrchr(rpc_server_exe, '/');
259         if (progname != NULL) {
260                 progname += 1;
261         } else {
262                 progname = rpc_server_exe;
263         }
264
265         state->ncalrpc_endpoint = talloc_strdup(state, progname);
266         if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
267                 return tevent_req_post(req, ev);
268         }
269
270         state->argl = talloc_array(state, char *, 4);
271         if (tevent_req_nomem(state->argl, req)) {
272                 return tevent_req_post(req, ev);
273         }
274
275         state->argl = str_list_make_empty(state);
276         str_list_add_printf(&state->argl, "%s", rpc_server_exe);
277         str_list_add_printf(&state->argl, "--list-interfaces");
278         str_list_add_printf(
279                 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
280
281         if (tevent_req_nomem(state->argl, req)) {
282                 return tevent_req_post(req, ev);
283         }
284
285         subreq = file_ploadv_send(state, ev, state->argl, 65536);
286         if (tevent_req_nomem(subreq, req)) {
287                 return tevent_req_post(req, ev);
288         }
289         tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
290         return req;
291 }
292
293 /*
294  * Parse a line of format
295  *
296  * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
297  *
298  * and add it to the "piface_names" array.
299  */
300
301 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
302         TALLOC_CTX *mem_ctx,
303         struct rpc_host_iface_name **piface_names,
304         const char *line)
305 {
306         struct rpc_host_iface_name *iface_names = *piface_names;
307         struct rpc_host_iface_name *tmp = NULL, *result = NULL;
308         size_t i, num_ifaces = talloc_array_length(iface_names);
309         struct ndr_syntax_id iface;
310         char *name = NULL;
311         bool ok;
312
313         ok = ndr_syntax_id_from_string(line, &iface);
314         if (!ok) {
315                 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
316                             line);
317                 return NULL;
318         }
319
320         name = strchr(line, ' ');
321         if (name == NULL) {
322                 return NULL;
323         }
324         name += 1;
325
326         for (i=0; i<num_ifaces; i++) {
327                 result = &iface_names[i];
328
329                 if (ndr_syntax_id_equal(&result->iface, &iface)) {
330                         return result;
331                 }
332         }
333
334         if (num_ifaces + 1 < num_ifaces) {
335                 return NULL;
336         }
337
338         name = talloc_strdup(mem_ctx, name);
339         if (name == NULL) {
340                 return NULL;
341         }
342
343         tmp = talloc_realloc(
344                 mem_ctx,
345                 iface_names,
346                 struct rpc_host_iface_name,
347                 num_ifaces + 1);
348         if (tmp == NULL) {
349                 TALLOC_FREE(name);
350                 return NULL;
351         }
352         iface_names = tmp;
353
354         result = &iface_names[num_ifaces];
355
356         *result = (struct rpc_host_iface_name) {
357                 .iface = iface,
358                 .name = talloc_move(iface_names, &name),
359         };
360
361         *piface_names = iface_names;
362
363         return result;
364 }
365
366 static struct rpc_host_iface_name *rpc_host_iface_names_find(
367         struct rpc_host_iface_name *iface_names,
368         const struct ndr_syntax_id *iface)
369 {
370         size_t i, num_iface_names = talloc_array_length(iface_names);
371
372         for (i=0; i<num_iface_names; i++) {
373                 struct rpc_host_iface_name *iface_name = &iface_names[i];
374
375                 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
376                         return iface_name;
377                 }
378         }
379
380         return NULL;
381 }
382
383 static bool dcerpc_binding_same_endpoint(
384         const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
385 {
386         enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
387         enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
388         const char *e1 = NULL, *e2 = NULL;
389         int cmp;
390
391         if (t1 != t2) {
392                 return false;
393         }
394
395         e1 = dcerpc_binding_get_string_option(b1, "endpoint");
396         e2 = dcerpc_binding_get_string_option(b2, "endpoint");
397
398         if ((e1 == NULL) && (e2 == NULL)) {
399                 return true;
400         }
401         if ((e1 == NULL) || (e2 == NULL)) {
402                 return false;
403         }
404         cmp = strcmp(e1, e2);
405         return (cmp == 0);
406 }
407
408 /**
409  * @brief Filter whether we want to serve an endpoint
410  *
411  * samba-dcerpcd might want to serve all endpoints a rpcd reported to
412  * us via --list-interfaces.
413  *
414  * In member mode, we only serve named pipes. Indicated by NCACN_NP
415  * passed in via "only_transport".
416  *
417  * @param[in] binding Which binding is in question?
418  * @param[in] only_transport Exclusive transport to serve
419  * @return Do we want to serve "binding" from samba-dcerpcd?
420  */
421
422 static bool rpc_host_serve_endpoint(
423         struct dcerpc_binding *binding,
424         enum dcerpc_transport_t only_transport)
425 {
426         enum dcerpc_transport_t transport =
427                 dcerpc_binding_get_transport(binding);
428
429         if (only_transport == NCA_UNKNOWN) {
430                 /* no filter around */
431                 return true;
432         }
433
434         if (transport != only_transport) {
435                 /* filter out */
436                 return false;
437         }
438
439         return true;
440 }
441
442 static struct rpc_host_endpoint *rpc_host_endpoint_find(
443         struct rpc_server_get_endpoints_state *state,
444         const char *binding_string)
445 {
446         size_t i, num_endpoints = talloc_array_length(state->endpoints);
447         struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
448         enum dcerpc_transport_t transport;
449         NTSTATUS status;
450         bool serve_this;
451
452         ep = talloc_zero(state, struct rpc_host_endpoint);
453         if (ep == NULL) {
454                 goto fail;
455         }
456
457         status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
458         if (!NT_STATUS_IS_OK(status)) {
459                 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
460                           binding_string,
461                           nt_errstr(status));
462                 goto fail;
463         }
464
465         serve_this = rpc_host_serve_endpoint(
466                 ep->binding, state->only_transport);
467         if (!serve_this) {
468                 goto fail;
469         }
470
471         transport = dcerpc_binding_get_transport(ep->binding);
472
473         if (transport == NCALRPC) {
474                 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
475                         ep->binding, "endpoint");
476
477                 if (ncalrpc_sock == NULL) {
478                         /*
479                          * generic ncalrpc:, set program-specific
480                          * socket name. epmapper will redirect clients
481                          * properly.
482                          */
483                         status = dcerpc_binding_set_string_option(
484                                 ep->binding,
485                                 "endpoint",
486                                 state->ncalrpc_endpoint);
487                         if (!NT_STATUS_IS_OK(status)) {
488                                 DBG_DEBUG("dcerpc_binding_set_string_option "
489                                           "failed: %s\n",
490                                           nt_errstr(status));
491                                 goto fail;
492                         }
493                 }
494         }
495
496         for (i=0; i<num_endpoints; i++) {
497
498                 bool ok = dcerpc_binding_same_endpoint(
499                         ep->binding, state->endpoints[i]->binding);
500
501                 if (ok) {
502                         TALLOC_FREE(ep);
503                         return state->endpoints[i];
504                 }
505         }
506
507         if (num_endpoints + 1 < num_endpoints) {
508                 goto fail;
509         }
510
511         tmp = talloc_realloc(
512                 state,
513                 state->endpoints,
514                 struct rpc_host_endpoint *,
515                 num_endpoints + 1);
516         if (tmp == NULL) {
517                 goto fail;
518         }
519         state->endpoints = tmp;
520         state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
521
522         return state->endpoints[num_endpoints];
523 fail:
524         TALLOC_FREE(ep);
525         return NULL;
526 }
527
528 static bool ndr_interfaces_add_unique(
529         TALLOC_CTX *mem_ctx,
530         struct ndr_syntax_id **pifaces,
531         const struct ndr_syntax_id *iface)
532 {
533         struct ndr_syntax_id *ifaces = *pifaces;
534         size_t i, num_ifaces = talloc_array_length(ifaces);
535
536         for (i=0; i<num_ifaces; i++) {
537                 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
538                         return true;
539                 }
540         }
541
542         if (num_ifaces + 1 < num_ifaces) {
543                 return false;
544         }
545         ifaces = talloc_realloc(
546                 mem_ctx,
547                 ifaces,
548                 struct ndr_syntax_id,
549                 num_ifaces + 1);
550         if (ifaces == NULL) {
551                 return false;
552         }
553         ifaces[num_ifaces] = *iface;
554
555         *pifaces = ifaces;
556         return true;
557 }
558
559 /*
560  * Read the text reply from the rpcd_* process telling us what
561  * endpoints it will serve when asked with --list-interfaces.
562  */
563 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
564 {
565         struct tevent_req *req = tevent_req_callback_data(
566                 subreq, struct tevent_req);
567         struct rpc_server_get_endpoints_state *state = tevent_req_data(
568                 req, struct rpc_server_get_endpoints_state);
569         struct rpc_host_iface_name *iface = NULL;
570         uint8_t *buf = NULL;
571         size_t buflen;
572         char **lines = NULL;
573         int ret, i, num_lines;
574
575         ret = file_ploadv_recv(subreq, state, &buf);
576         TALLOC_FREE(subreq);
577         if (tevent_req_error(req, ret)) {
578                 return;
579         }
580
581         buflen = talloc_get_size(buf);
582         if (buflen == 0) {
583                 tevent_req_done(req);
584                 return;
585         }
586
587         lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
588         if (tevent_req_nomem(lines, req)) {
589                 return;
590         }
591
592         if (num_lines < 2) {
593                 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
594                 tevent_req_error(req, EINVAL);
595                 return;
596         }
597
598         state->num_workers = smb_strtoul(
599                 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
600         if (ret != 0) {
601                 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
602                           lines[0],
603                           strerror(ret));
604                 tevent_req_error(req, ret);
605                 return;
606         }
607         /*
608          * We need to limit the number of workers in order
609          * to put the worker index into a 16-bit space,
610          * in order to use a 16-bit association group space
611          * per worker.
612          */
613         if (state->num_workers > 65536) {
614                 state->num_workers = 65536;
615         }
616
617         state->idle_seconds = smb_strtoul(
618                 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
619         if (ret != 0) {
620                 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
621                           lines[1],
622                           strerror(ret));
623                 tevent_req_error(req, ret);
624                 return;
625         }
626
627         DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
628                   state->num_workers,
629                   state->idle_seconds,
630                   state->argl[0]);
631
632         for (i=2; i<num_lines; i++) {
633                 char *line = lines[i];
634                 struct rpc_host_endpoint *endpoint = NULL;
635                 bool ok;
636
637                 if (line[0] != ' ') {
638                         iface = rpc_exe_parse_iface_line(
639                                 state, &state->iface_names, line);
640                         if (iface == NULL) {
641                                 DBG_WARNING(
642                                         "rpc_exe_parse_iface_line failed "
643                                         "for: [%s] from %s\n",
644                                         line,
645                                         state->argl[0]);
646                                 tevent_req_oom(req);
647                                 return;
648                         }
649                         continue;
650                 }
651
652                 if (iface == NULL) {
653                         DBG_DEBUG("Interface GUID line missing\n");
654                         tevent_req_error(req, EINVAL);
655                         return;
656                 }
657
658                 endpoint = rpc_host_endpoint_find(state, line+1);
659                 if (endpoint == NULL) {
660                         DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
661                                   line+1);
662                         continue;
663                 }
664
665                 ok = ndr_interfaces_add_unique(
666                         endpoint,
667                         &endpoint->interfaces,
668                         &iface->iface);
669                 if (!ok) {
670                         DBG_DEBUG("ndr_interfaces_add_unique failed\n");
671                         tevent_req_oom(req);
672                         return;
673                 }
674         }
675
676         tevent_req_done(req);
677 }
678
679 /**
680  * @brief Receive output from --list-interfaces
681  *
682  * @param[in] req The async req that just finished
683  * @param[in] mem_ctx Where to put the output on
684  * @param[out] endpoints The endpoints to be listened on
685  * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
686  * @return 0/errno
687  */
688 static int rpc_server_get_endpoints_recv(
689         struct tevent_req *req,
690         TALLOC_CTX *mem_ctx,
691         struct rpc_host_endpoint ***endpoints,
692         struct rpc_host_iface_name **iface_names,
693         size_t *num_workers,
694         size_t *idle_seconds)
695 {
696         struct rpc_server_get_endpoints_state *state = tevent_req_data(
697                 req, struct rpc_server_get_endpoints_state);
698         int err;
699
700         if (tevent_req_is_unix_error(req, &err)) {
701                 tevent_req_received(req);
702                 return err;
703         }
704
705         *endpoints = talloc_move(mem_ctx, &state->endpoints);
706         *iface_names = talloc_move(mem_ctx, &state->iface_names);
707         *num_workers = state->num_workers;
708         *idle_seconds = state->idle_seconds;
709         tevent_req_received(req);
710         return 0;
711 }
712
713 /*
714  * For NCACN_NP we get the named pipe auth info from smbd, if a client
715  * comes in via TCP or NCALPRC we need to invent it ourselves with
716  * anonymous session info.
717  */
718
719 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
720         TALLOC_CTX *mem_ctx,
721         enum dcerpc_transport_t transport,
722         int sock,
723         const struct samba_sockaddr *peer_addr,
724         struct named_pipe_auth_req_info8 **pinfo8)
725 {
726         struct named_pipe_auth_req_info8 *info8 = NULL;
727         struct samba_sockaddr local_addr = {
728                 .sa_socklen = sizeof(struct sockaddr_storage),
729         };
730         struct tsocket_address *taddr = NULL;
731         char *remote_client_name = NULL;
732         char *remote_client_addr = NULL;
733         char *local_server_name = NULL;
734         char *local_server_addr = NULL;
735         char *(*tsocket_address_to_name_fn)(
736                 const struct tsocket_address *addr,
737                 TALLOC_CTX *mem_ctx) = NULL;
738         NTSTATUS status = NT_STATUS_NO_MEMORY;
739         int ret;
740
741         /*
742          * For NCACN_NP we get the npa info from smbd
743          */
744         SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
745
746         tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
747                 tsocket_address_inet_addr_string : tsocket_address_unix_path;
748
749         info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
750         if (info8 == NULL) {
751                 goto fail;
752         }
753         info8->session_info =
754                 talloc_zero(info8, struct auth_session_info_transport);
755         if (info8->session_info == NULL) {
756                 goto fail;
757         }
758
759         status = make_session_info_anonymous(
760                 info8->session_info,
761                 &info8->session_info->session_info);
762         if (!NT_STATUS_IS_OK(status)) {
763                 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
764                           nt_errstr(status));
765                 goto fail;
766         }
767
768         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
769                                                       peer_addr,
770                                                       &taddr);
771         if (ret == -1) {
772                 status = map_nt_error_from_unix(errno);
773                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
774                           "%s\n",
775                           strerror(errno));
776                 goto fail;
777         }
778         remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
779         if (remote_client_addr == NULL) {
780                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
781                 goto nomem;
782         }
783         TALLOC_FREE(taddr);
784
785         remote_client_name = talloc_strdup(info8, remote_client_addr);
786         if (remote_client_name == NULL) {
787                 DBG_DEBUG("talloc_strdup failed\n");
788                 goto nomem;
789         }
790
791         if (transport == NCACN_IP_TCP) {
792                 bool ok = samba_sockaddr_get_port(peer_addr,
793                                                   &info8->remote_client_port);
794                 if (!ok) {
795                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
796                         status = NT_STATUS_INVALID_PARAMETER;
797                         goto fail;
798                 }
799         }
800
801         ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
802         if (ret == -1) {
803                 status = map_nt_error_from_unix(errno);
804                 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
805                 goto fail;
806         }
807
808         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
809                                                       &local_addr,
810                                                       &taddr);
811         if (ret == -1) {
812                 status = map_nt_error_from_unix(errno);
813                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
814                           "%s\n",
815                           strerror(errno));
816                 goto fail;
817         }
818         local_server_addr = tsocket_address_to_name_fn(taddr, info8);
819         if (local_server_addr == NULL) {
820                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
821                 goto nomem;
822         }
823         TALLOC_FREE(taddr);
824
825         local_server_name = talloc_strdup(info8, local_server_addr);
826         if (local_server_name == NULL) {
827                 DBG_DEBUG("talloc_strdup failed\n");
828                 goto nomem;
829         }
830
831         if (transport == NCACN_IP_TCP) {
832                 bool ok = samba_sockaddr_get_port(&local_addr,
833                                                   &info8->local_server_port);
834                 if (!ok) {
835                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
836                         status = NT_STATUS_INVALID_PARAMETER;
837                         goto fail;
838                 }
839         }
840
841         if (transport == NCALRPC) {
842                 uid_t uid;
843                 gid_t gid;
844
845                 ret = getpeereid(sock, &uid, &gid);
846                 if (ret < 0) {
847                         status = map_nt_error_from_unix(errno);
848                         DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
849                         goto fail;
850                 }
851
852                 if (uid == sec_initial_uid()) {
853
854                         /*
855                          * Indicate "root" to gensec
856                          */
857
858                         TALLOC_FREE(remote_client_addr);
859                         TALLOC_FREE(remote_client_name);
860
861                         ret = tsocket_address_unix_from_path(
862                                 info8,
863                                 AS_SYSTEM_MAGIC_PATH_TOKEN,
864                                 &taddr);
865                         if (ret == -1) {
866                                 DBG_DEBUG("tsocket_address_unix_from_path "
867                                           "failed\n");
868                                 goto nomem;
869                         }
870
871                         remote_client_addr =
872                                 tsocket_address_unix_path(taddr, info8);
873                         if (remote_client_addr == NULL) {
874                                 DBG_DEBUG("tsocket_address_unix_path "
875                                           "failed\n");
876                                 goto nomem;
877                         }
878                         remote_client_name =
879                                 talloc_strdup(info8, remote_client_addr);
880                         if (remote_client_name == NULL) {
881                                 DBG_DEBUG("talloc_strdup failed\n");
882                                 goto nomem;
883                         }
884                 }
885         }
886
887         info8->remote_client_addr = remote_client_addr;
888         info8->remote_client_name = remote_client_name;
889         info8->local_server_addr = local_server_addr;
890         info8->local_server_name = local_server_name;
891
892         *pinfo8 = info8;
893         return NT_STATUS_OK;
894
895 nomem:
896         status = NT_STATUS_NO_MEMORY;
897 fail:
898         TALLOC_FREE(info8);
899         return status;
900 }
901
902 struct rpc_host_bind_read_state {
903         struct tevent_context *ev;
904
905         int sock;
906         struct tstream_context *plain;
907         struct tstream_context *npa_stream;
908
909         struct ncacn_packet *pkt;
910         struct rpc_host_client *client;
911 };
912
913 static void rpc_host_bind_read_cleanup(
914         struct tevent_req *req, enum tevent_req_state req_state);
915 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
916 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
917
918 /*
919  * Wait for a bind packet from a client.
920  */
921 static struct tevent_req *rpc_host_bind_read_send(
922         TALLOC_CTX *mem_ctx,
923         struct tevent_context *ev,
924         enum dcerpc_transport_t transport,
925         int *psock,
926         const struct samba_sockaddr *peer_addr)
927 {
928         struct tevent_req *req = NULL, *subreq = NULL;
929         struct rpc_host_bind_read_state *state = NULL;
930         int rc, sock_dup;
931         NTSTATUS status;
932
933         req = tevent_req_create(
934                 mem_ctx, &state, struct rpc_host_bind_read_state);
935         if (req == NULL) {
936                 return NULL;
937         }
938         state->ev = ev;
939
940         state->sock = *psock;
941         *psock = -1;
942
943         tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
944
945         state->client = talloc_zero(state, struct rpc_host_client);
946         if (tevent_req_nomem(state->client, req)) {
947                 return tevent_req_post(req, ev);
948         }
949
950         /*
951          * Dup the socket to read the first RPC packet:
952          * tstream_bsd_existing_socket() takes ownership with
953          * autoclose, but we need to send "sock" down to our worker
954          * process later.
955          */
956         sock_dup = dup(state->sock);
957         if (sock_dup == -1) {
958                 tevent_req_error(req, errno);
959                 return tevent_req_post(req, ev);
960         }
961
962         rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
963         if (rc == -1) {
964                 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
965                           strerror(errno));
966                 tevent_req_error(req, errno);
967                 close(sock_dup);
968                 return tevent_req_post(req, ev);
969         }
970
971         if (transport == NCACN_NP) {
972                 subreq = tstream_npa_accept_existing_send(
973                         state,
974                         ev,
975                         state->plain,
976                         FILE_TYPE_MESSAGE_MODE_PIPE,
977                         0xff | 0x0400 | 0x0100,
978                         4096);
979                 if (tevent_req_nomem(subreq, req)) {
980                         return tevent_req_post(req, ev);
981                 }
982                 tevent_req_set_callback(
983                         subreq, rpc_host_bind_read_got_npa, req);
984                 return req;
985         }
986
987         status = rpc_host_generate_npa_info8_from_sock(
988                 state->client,
989                 transport,
990                 state->sock,
991                 peer_addr,
992                 &state->client->npa_info8);
993         if (!NT_STATUS_IS_OK(status)) {
994                 tevent_req_oom(req);
995                 return tevent_req_post(req, ev);
996         }
997
998         subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
999         if (tevent_req_nomem(subreq, req)) {
1000                 return tevent_req_post(req, ev);
1001         }
1002         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1003         return req;
1004 }
1005
1006 static void rpc_host_bind_read_cleanup(
1007         struct tevent_req *req, enum tevent_req_state req_state)
1008 {
1009         struct rpc_host_bind_read_state *state = tevent_req_data(
1010                 req, struct rpc_host_bind_read_state);
1011
1012         if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1013                 close(state->sock);
1014                 state->sock = -1;
1015         }
1016 }
1017
1018 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1019 {
1020         struct tevent_req *req = tevent_req_callback_data(
1021                 subreq, struct tevent_req);
1022         struct rpc_host_bind_read_state *state = tevent_req_data(
1023                 req, struct rpc_host_bind_read_state);
1024         struct named_pipe_auth_req_info8 *info8 = NULL;
1025         int ret, err;
1026
1027         ret = tstream_npa_accept_existing_recv(subreq,
1028                                                &err,
1029                                                state,
1030                                                &state->npa_stream,
1031                                                &info8,
1032                                                NULL,  /* transport */
1033                                                NULL,  /* remote_client_addr */
1034                                                NULL,  /* remote_client_name */
1035                                                NULL,  /* local_server_addr */
1036                                                NULL,  /* local_server_name */
1037                                                NULL); /* session_info */
1038         if (ret == -1) {
1039                 tevent_req_error(req, err);
1040                 return;
1041         }
1042
1043         state->client->npa_info8 = talloc_move(state->client, &info8);
1044
1045         subreq = dcerpc_read_ncacn_packet_send(
1046                 state, state->ev, state->npa_stream);
1047         if (tevent_req_nomem(subreq, req)) {
1048                 return;
1049         }
1050         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1051 }
1052
1053 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1054 {
1055         struct tevent_req *req = tevent_req_callback_data(
1056                 subreq, struct tevent_req);
1057         struct rpc_host_bind_read_state *state = tevent_req_data(
1058                 req, struct rpc_host_bind_read_state);
1059         struct ncacn_packet *pkt = NULL;
1060         NTSTATUS status;
1061
1062         status = dcerpc_read_ncacn_packet_recv(
1063                 subreq,
1064                 state->client,
1065                 &pkt,
1066                 &state->client->bind_packet);
1067         TALLOC_FREE(subreq);
1068         if (!NT_STATUS_IS_OK(status)) {
1069                 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1070                           nt_errstr(status));
1071                 tevent_req_error(req, EINVAL); /* TODO */
1072                 return;
1073         }
1074         state->pkt = talloc_move(state, &pkt);
1075
1076         tevent_req_done(req);
1077 }
1078
1079 static int rpc_host_bind_read_recv(
1080         struct tevent_req *req,
1081         TALLOC_CTX *mem_ctx,
1082         int *sock,
1083         struct rpc_host_client **client,
1084         struct ncacn_packet **bind_pkt)
1085 {
1086         struct rpc_host_bind_read_state *state = tevent_req_data(
1087                 req, struct rpc_host_bind_read_state);
1088         int err;
1089
1090         if (tevent_req_is_unix_error(req, &err)) {
1091                 tevent_req_received(req);
1092                 return err;
1093         }
1094
1095         *sock = state->sock;
1096         state->sock = -1;
1097
1098         *client = talloc_move(mem_ctx, &state->client);
1099         *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1100         tevent_req_received(req);
1101         return 0;
1102 }
1103
1104 /*
1105  * Start the given rpcd_* binary.
1106  */
1107 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1108 {
1109         struct rpc_work_process *worker = &server->workers[idx];
1110         char **argv = NULL;
1111         int ret = ENOMEM;
1112
1113         argv = str_list_make_empty(server);
1114         str_list_add_printf(
1115                 &argv, "%s", server->rpc_server_exe);
1116         str_list_add_printf(
1117                 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1118         str_list_add_printf(
1119                 &argv, "--worker-group=%"PRIu32, server->server_index);
1120         str_list_add_printf(
1121                 &argv, "--worker-index=%zu", idx);
1122         str_list_add_printf(
1123                 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1124         if (!is_default_dyn_LOGFILEBASE()) {
1125                 str_list_add_printf(
1126                         &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1127         }
1128         if (argv == NULL) {
1129                 ret = ENOMEM;
1130                 goto fail;
1131         }
1132
1133         worker->pid = fork();
1134         if (worker->pid == -1) {
1135                 ret = errno;
1136                 goto fail;
1137         }
1138         if (worker->pid == 0) {
1139                 /* Child. */
1140                 close(server->host->worker_stdin[1]);
1141                 ret = dup2(server->host->worker_stdin[0], 0);
1142                 if (ret != 0) {
1143                         exit(1);
1144                 }
1145                 execv(argv[0], argv);
1146                 _exit(1);
1147         }
1148
1149         DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1150                   server->rpc_server_exe,
1151                   idx,
1152                   (int)worker->pid);
1153
1154         ret = 0;
1155 fail:
1156         TALLOC_FREE(argv);
1157         return ret;
1158 }
1159
1160 /*
1161  * Find an rpcd_* worker for an external client, respect server->max_workers
1162  */
1163 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1164 {
1165         struct rpc_work_process *worker = NULL;
1166         struct rpc_work_process *perfect_worker = NULL;
1167         struct rpc_work_process *best_worker = NULL;
1168         size_t empty_slot = SIZE_MAX;
1169         size_t i;
1170
1171         for (i=0; i<server->max_workers; i++) {
1172                 worker = &server->workers[i];
1173
1174                 if (worker->pid == -1) {
1175                         empty_slot = MIN(empty_slot, i);
1176                         continue;
1177                 }
1178                 if (!worker->available) {
1179                         continue;
1180                 }
1181                 if (worker->num_associations == 0) {
1182                         /*
1183                          * We have an idle worker...
1184                          */
1185                         perfect_worker = worker;
1186                         break;
1187                 }
1188                 if (best_worker == NULL) {
1189                         /*
1190                          * It's busy, but the best so far...
1191                          */
1192                         best_worker = worker;
1193                         continue;
1194                 }
1195                 if (worker->num_associations < best_worker->num_associations) {
1196                         /*
1197                          * It's also busy, but has less association groups
1198                          * (logical clients)
1199                          */
1200                         best_worker = worker;
1201                         continue;
1202                 }
1203                 if (worker->num_associations > best_worker->num_associations) {
1204                         /*
1205                          * It's not better
1206                          */
1207                         continue;
1208                 }
1209                 /*
1210                  * Ok, with the same number of association groups
1211                  * we pick the one with the lowest number of connections
1212                  */
1213                 if (worker->num_connections < best_worker->num_connections) {
1214                         best_worker = worker;
1215                         continue;
1216                 }
1217         }
1218
1219         if (perfect_worker != NULL) {
1220                 return perfect_worker;
1221         }
1222
1223         if (empty_slot < SIZE_MAX) {
1224                 int ret = rpc_host_exec_worker(server, empty_slot);
1225                 if (ret != 0) {
1226                         DBG_WARNING("Could not fork worker: %s\n",
1227                                     strerror(ret));
1228                 }
1229                 return NULL;
1230         }
1231
1232         if (best_worker != NULL) {
1233                 return best_worker;
1234         }
1235
1236         return NULL;
1237 }
1238
1239 /*
1240  * Find an rpcd_* worker for an internal connection, possibly go beyond
1241  * server->max_workers
1242  */
1243 static struct rpc_work_process *rpc_host_find_idle_worker(
1244         struct rpc_server *server)
1245 {
1246         struct rpc_work_process *worker = NULL, *tmp = NULL;
1247         size_t i, num_workers = talloc_array_length(server->workers);
1248         size_t empty_slot = SIZE_MAX;
1249         int ret;
1250
1251         for (i=server->max_workers; i<num_workers; i++) {
1252                 worker = &server->workers[i];
1253
1254                 if (worker->pid == -1) {
1255                         empty_slot = MIN(empty_slot, i);
1256                         continue;
1257                 }
1258                 if (!worker->available) {
1259                         continue;
1260                 }
1261                 if (worker->num_associations == 0) {
1262                         return &server->workers[i];
1263                 }
1264         }
1265
1266         if (empty_slot < SIZE_MAX) {
1267                 ret = rpc_host_exec_worker(server, empty_slot);
1268                 if (ret != 0) {
1269                         DBG_WARNING("Could not fork worker: %s\n",
1270                                     strerror(ret));
1271                 }
1272                 return NULL;
1273         }
1274
1275         /*
1276          * All workers are busy. We need to expand the number of
1277          * workers because we were asked for an idle worker.
1278          */
1279         if (num_workers >= UINT16_MAX) {
1280                 /*
1281                  * The worker index would not fix into 16-bits
1282                  */
1283                 return NULL;
1284         }
1285         tmp = talloc_realloc(
1286                 server,
1287                 server->workers,
1288                 struct rpc_work_process,
1289                 num_workers+1);
1290         if (tmp == NULL) {
1291                 return NULL;
1292         }
1293         server->workers = tmp;
1294
1295         server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1296
1297         ret = rpc_host_exec_worker(server, num_workers);
1298         if (ret != 0) {
1299                 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1300         }
1301
1302         return NULL;
1303 }
1304
1305 /*
1306  * Find an rpcd_* process to talk to. Start a new one if necessary.
1307  */
1308 static void rpc_host_distribute_clients(struct rpc_server *server)
1309 {
1310         struct rpc_work_process *worker = NULL;
1311         struct rpc_host_pending_client *pending_client = NULL;
1312         uint32_t assoc_group_id;
1313         DATA_BLOB blob;
1314         struct iovec iov;
1315         enum ndr_err_code ndr_err;
1316         NTSTATUS status;
1317         const char *client_type = NULL;
1318
1319 again:
1320         pending_client = server->pending_clients;
1321         if (pending_client == NULL) {
1322                 DBG_DEBUG("No pending clients\n");
1323                 return;
1324         }
1325
1326         assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1327
1328         if (assoc_group_id != 0) {
1329                 size_t num_workers = talloc_array_length(server->workers);
1330                 uint16_t worker_index = assoc_group_id >> 16;
1331
1332                 client_type = "associated";
1333
1334                 if (worker_index >= num_workers) {
1335                         DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1336                                   assoc_group_id);
1337                         goto done;
1338                 }
1339                 worker = &server->workers[worker_index];
1340
1341                 if ((worker->pid == -1) || !worker->available) {
1342                         DBG_DEBUG("Requested worker index %"PRIu16": "
1343                                   "pid=%d, available=%d\n",
1344                                   worker_index,
1345                                   (int)worker->pid,
1346                                   (int)worker->available);
1347                         /*
1348                          * Pick a random one for a proper bind nack
1349                          */
1350                         client_type = "associated+lost";
1351                         worker = rpc_host_find_worker(server);
1352                 }
1353         } else {
1354                 struct auth_session_info_transport *session_info =
1355                         pending_client->client->npa_info8->session_info;
1356                 uint32_t flags = 0;
1357                 bool found;
1358
1359                 client_type = "new";
1360
1361                 found = security_token_find_npa_flags(
1362                         session_info->session_info->security_token,
1363                         &flags);
1364
1365                 /* fresh assoc group requested */
1366                 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1367                         client_type = "new+exclusive";
1368                         worker = rpc_host_find_idle_worker(server);
1369                 } else {
1370                         client_type = "new";
1371                         worker = rpc_host_find_worker(server);
1372                 }
1373         }
1374
1375         if (worker == NULL) {
1376                 DBG_DEBUG("No worker found for %s client\n", client_type);
1377                 return;
1378         }
1379
1380         DLIST_REMOVE(server->pending_clients, pending_client);
1381
1382         ndr_err = ndr_push_struct_blob(
1383                 &blob,
1384                 pending_client,
1385                 pending_client->client,
1386                 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1387         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1388                 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1389                             ndr_errstr(ndr_err));
1390                 goto done;
1391         }
1392
1393         DBG_INFO("Sending %s client %s to %d with "
1394                  "%"PRIu32" associations and %"PRIu32" connections\n",
1395                  client_type,
1396                  server->rpc_server_exe,
1397                  worker->pid,
1398                  worker->num_associations,
1399                  worker->num_connections);
1400
1401         iov = (struct iovec) {
1402                 .iov_base = blob.data, .iov_len = blob.length,
1403         };
1404
1405         status = messaging_send_iov(
1406                 server->host->msg_ctx,
1407                 pid_to_procid(worker->pid),
1408                 MSG_RPC_HOST_NEW_CLIENT,
1409                 &iov,
1410                 1,
1411                 &pending_client->sock,
1412                 1);
1413         if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1414                 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1415                           worker->pid);
1416                 DLIST_ADD(server->pending_clients, pending_client);
1417                 worker->available = false;
1418                 goto again;
1419         }
1420         if (!NT_STATUS_IS_OK(status)) {
1421                 DBG_DEBUG("messaging_send_iov failed: %s\n",
1422                           nt_errstr(status));
1423                 goto done;
1424         }
1425         if (assoc_group_id == 0) {
1426                 worker->num_associations += 1;
1427         }
1428         worker->num_connections += 1;
1429         TALLOC_FREE(worker->exit_timer);
1430
1431         TALLOC_FREE(server->host->np_helper_shutdown);
1432
1433 done:
1434         TALLOC_FREE(pending_client);
1435 }
1436
1437 static int rpc_host_pending_client_destructor(
1438         struct rpc_host_pending_client *p)
1439 {
1440         TALLOC_FREE(p->hangup_wait);
1441         if (p->sock != -1) {
1442                 close(p->sock);
1443                 p->sock = -1;
1444         }
1445         DLIST_REMOVE(p->server->pending_clients, p);
1446         return 0;
1447 }
1448
1449 /*
1450  * Exception condition handler before rpcd_* worker
1451  * is handling the socket. Either the client exited or
1452  * sent unexpected data after the initial bind.
1453  */
1454 static void rpc_host_client_exited(struct tevent_req *subreq)
1455 {
1456         struct rpc_host_pending_client *pending = tevent_req_callback_data(
1457                 subreq, struct rpc_host_pending_client);
1458         bool ok;
1459         int err;
1460
1461         ok = wait_for_read_recv(subreq, &err);
1462
1463         TALLOC_FREE(subreq);
1464         pending->hangup_wait = NULL;
1465
1466         if (ok) {
1467                 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1468         } else {
1469                 DBG_DEBUG("client exited with %s\n", strerror(err));
1470         }
1471         TALLOC_FREE(pending);
1472 }
1473
1474 struct rpc_iface_binding_map {
1475         struct ndr_syntax_id iface;
1476         char *bindings;
1477 };
1478
1479 static bool rpc_iface_binding_map_add_endpoint(
1480         TALLOC_CTX *mem_ctx,
1481         const struct rpc_host_endpoint *ep,
1482         struct rpc_host_iface_name *iface_names,
1483         struct rpc_iface_binding_map **pmaps)
1484 {
1485         const struct ndr_syntax_id mgmt_iface = {
1486                 {0xafa8bd80,
1487                  0x7d8a,
1488                  0x11c9,
1489                  {0xbe,0xf4},
1490                  {0x08,0x00,0x2b,0x10,0x29,0x89}
1491                 },
1492                 1.0};
1493
1494         struct rpc_iface_binding_map *maps = *pmaps;
1495         size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1496         char *binding_string = NULL;
1497         bool ok = false;
1498
1499         binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1500         if (binding_string == NULL) {
1501                 return false;
1502         }
1503
1504         for (i=0; i<num_ifaces; i++) {
1505                 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1506                 size_t j, num_maps = talloc_array_length(maps);
1507                 struct rpc_iface_binding_map *map = NULL;
1508                 char *p = NULL;
1509
1510                 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1511                         /*
1512                          * mgmt is offered everywhere, don't put it
1513                          * into epmdb.tdb.
1514                          */
1515                         continue;
1516                 }
1517
1518                 for (j=0; j<num_maps; j++) {
1519                         map = &maps[j];
1520                         if (ndr_syntax_id_equal(&map->iface, iface)) {
1521                                 break;
1522                         }
1523                 }
1524
1525                 if (j == num_maps) {
1526                         struct rpc_iface_binding_map *tmp = NULL;
1527                         struct rpc_host_iface_name *iface_name = NULL;
1528
1529                         iface_name = rpc_host_iface_names_find(
1530                                 iface_names, iface);
1531                         if (iface_name == NULL) {
1532                                 goto fail;
1533                         }
1534
1535                         tmp = talloc_realloc(
1536                                 mem_ctx,
1537                                 maps,
1538                                 struct rpc_iface_binding_map,
1539                                 num_maps+1);
1540                         if (tmp == NULL) {
1541                                 goto fail;
1542                         }
1543                         maps = tmp;
1544
1545                         map = &maps[num_maps];
1546                         *map = (struct rpc_iface_binding_map) {
1547                                 .iface = *iface,
1548                                 .bindings = talloc_move(
1549                                         maps, &iface_name->name),
1550                         };
1551                 }
1552
1553                 p = strv_find(map->bindings, binding_string);
1554                 if (p == NULL) {
1555                         int ret = strv_add(
1556                                 maps, &map->bindings, binding_string);
1557                         if (ret != 0) {
1558                                 goto fail;
1559                         }
1560                 }
1561         }
1562
1563         ok = true;
1564 fail:
1565         *pmaps = maps;
1566         return ok;
1567 }
1568
1569 static bool rpc_iface_binding_map_add_endpoints(
1570         TALLOC_CTX *mem_ctx,
1571         struct rpc_host_endpoint **endpoints,
1572         struct rpc_host_iface_name *iface_names,
1573         struct rpc_iface_binding_map **pbinding_maps)
1574 {
1575         size_t i, num_endpoints = talloc_array_length(endpoints);
1576
1577         for (i=0; i<num_endpoints; i++) {
1578                 bool ok = rpc_iface_binding_map_add_endpoint(
1579                         mem_ctx, endpoints[i], iface_names, pbinding_maps);
1580                 if (!ok) {
1581                         return false;
1582                 }
1583         }
1584         return true;
1585 }
1586
1587 static bool rpc_host_fill_epm_db(
1588         struct tdb_wrap *db,
1589         struct rpc_host_endpoint **endpoints,
1590         struct rpc_host_iface_name *iface_names)
1591 {
1592         struct rpc_iface_binding_map *maps = NULL;
1593         size_t i, num_maps;
1594         bool ret = false;
1595         bool ok;
1596
1597         ok = rpc_iface_binding_map_add_endpoints(
1598                 talloc_tos(), endpoints, iface_names, &maps);
1599         if (!ok) {
1600                 goto fail;
1601         }
1602
1603         num_maps = talloc_array_length(maps);
1604
1605         for (i=0; i<num_maps; i++) {
1606                 struct rpc_iface_binding_map *map = &maps[i];
1607                 struct ndr_syntax_id_buf buf;
1608                 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1609                 TDB_DATA value = {
1610                         .dptr = (uint8_t *)map->bindings,
1611                         .dsize = talloc_array_length(map->bindings),
1612                 };
1613                 int rc;
1614
1615                 rc = tdb_store(
1616                         db->tdb, string_term_tdb_data(keystr), value, 0);
1617                 if (rc == -1) {
1618                         DBG_DEBUG("tdb_store() failed: %s\n",
1619                                   tdb_errorstr(db->tdb));
1620                         goto fail;
1621                 }
1622         }
1623
1624         ret = true;
1625 fail:
1626         TALLOC_FREE(maps);
1627         return ret;
1628 }
1629
1630 struct rpc_server_setup_state {
1631         struct rpc_server *server;
1632 };
1633
1634 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1635
1636 /*
1637  * Async initialize state for all possible rpcd_* servers.
1638  * Note this does not start them.
1639  */
1640 static struct tevent_req *rpc_server_setup_send(
1641         TALLOC_CTX *mem_ctx,
1642         struct tevent_context *ev,
1643         struct rpc_host *host,
1644         const char *rpc_server_exe)
1645 {
1646         struct tevent_req *req = NULL, *subreq = NULL;
1647         struct rpc_server_setup_state *state = NULL;
1648         struct rpc_server *server = NULL;
1649
1650         req = tevent_req_create(
1651                 mem_ctx, &state, struct rpc_server_setup_state);
1652         if (req == NULL) {
1653                 return NULL;
1654         }
1655         state->server = talloc_zero(state, struct rpc_server);
1656         if (tevent_req_nomem(state->server, req)) {
1657                 return tevent_req_post(req, ev);
1658         }
1659
1660         server = state->server;
1661
1662         *server = (struct rpc_server) {
1663                 .host = host,
1664                 .server_index = UINT32_MAX,
1665                 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1666         };
1667         if (tevent_req_nomem(server->rpc_server_exe, req)) {
1668                 return tevent_req_post(req, ev);
1669         }
1670
1671         subreq = rpc_server_get_endpoints_send(
1672                 state,
1673                 ev,
1674                 rpc_server_exe,
1675                 host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1676         if (tevent_req_nomem(subreq, req)) {
1677                 return tevent_req_post(req, ev);
1678         }
1679         tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1680         return req;
1681 }
1682
1683 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1684 {
1685         struct tevent_req *req = tevent_req_callback_data(
1686                 subreq, struct tevent_req);
1687         struct rpc_server_setup_state *state = tevent_req_data(
1688                 req, struct rpc_server_setup_state);
1689         struct rpc_server *server = state->server;
1690         int ret;
1691         size_t i, num_endpoints;
1692         bool ok;
1693
1694         ret = rpc_server_get_endpoints_recv(
1695                 subreq,
1696                 server,
1697                 &server->endpoints,
1698                 &server->iface_names,
1699                 &server->max_workers,
1700                 &server->idle_seconds);
1701         TALLOC_FREE(subreq);
1702         if (ret != 0) {
1703                 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1704                 return;
1705         }
1706
1707         server->workers = talloc_array(
1708                 server, struct rpc_work_process, server->max_workers);
1709         if (tevent_req_nomem(server->workers, req)) {
1710                 return;
1711         }
1712
1713         for (i=0; i<server->max_workers; i++) {
1714                 /* mark as not yet created */
1715                 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1716         }
1717
1718         num_endpoints = talloc_array_length(server->endpoints);
1719
1720         for (i=0; i<num_endpoints; i++) {
1721                 struct rpc_host_endpoint *e = server->endpoints[i];
1722                 NTSTATUS status;
1723                 size_t j;
1724
1725                 e->server = server;
1726
1727                 status = dcesrv_create_binding_sockets(
1728                         e->binding, e, &e->num_fds, &e->fds);
1729                 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1730                         continue;
1731                 }
1732                 if (tevent_req_nterror(req, status)) {
1733                         DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1734                                   nt_errstr(status));
1735                         return;
1736                 }
1737
1738                 for (j=0; j<e->num_fds; j++) {
1739                         ret = listen(e->fds[j], 256);
1740                         if (ret == -1) {
1741                                 tevent_req_nterror(
1742                                         req, map_nt_error_from_unix(errno));
1743                                 return;
1744                         }
1745                 }
1746         }
1747
1748         ok = rpc_host_fill_epm_db(
1749                 server->host->epmdb, server->endpoints, server->iface_names);
1750         if (!ok) {
1751                 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1752         }
1753
1754         tevent_req_done(req);
1755 }
1756
1757 static NTSTATUS rpc_server_setup_recv(
1758         struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1759 {
1760         struct rpc_server_setup_state *state = tevent_req_data(
1761                 req, struct rpc_server_setup_state);
1762         NTSTATUS status;
1763
1764         if (tevent_req_is_nterror(req, &status)) {
1765                 tevent_req_received(req);
1766                 return status;
1767         }
1768
1769         *server = talloc_move(mem_ctx, &state->server);
1770         tevent_req_received(req);
1771         return NT_STATUS_OK;
1772 }
1773
1774 /*
1775  * rpcd_* died. Called from SIGCHLD handler.
1776  */
1777 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1778 {
1779         size_t i, num_servers = talloc_array_length(host->servers);
1780         struct rpc_work_process *worker = NULL;
1781         bool found_pid = false;
1782         bool have_active_worker = false;
1783
1784         for (i=0; i<num_servers; i++) {
1785                 struct rpc_server *server = host->servers[i];
1786                 size_t j, num_workers;
1787
1788                 if (server == NULL) {
1789                         /* SIGCHLD for --list-interfaces run */
1790                         continue;
1791                 }
1792
1793                 num_workers = talloc_array_length(server->workers);
1794
1795                 for (j=0; j<num_workers; j++) {
1796                         worker = &server->workers[j];
1797                         if (worker->pid == pid) {
1798                                 found_pid = true;
1799                                 worker->pid = -1;
1800                                 worker->available = false;
1801                         }
1802
1803                         if (worker->pid != -1) {
1804                                 have_active_worker = true;
1805                         }
1806                 }
1807         }
1808
1809         if (!found_pid) {
1810                 DBG_WARNING("No worker with PID %d\n", (int)pid);
1811                 return;
1812         }
1813
1814         if (!have_active_worker && host->np_helper) {
1815                 /*
1816                  * We have nothing left to do as an np_helper.
1817                  * Terminate ourselves (samba-dcerpcd). We will
1818                  * be restarted on demand anyway.
1819                  */
1820                 DBG_DEBUG("Exiting idle np helper\n");
1821                 exit(0);
1822         }
1823 }
1824
1825 /*
1826  * rpcd_* died.
1827  */
1828 static void rpc_host_sigchld(
1829         struct tevent_context *ev,
1830         struct tevent_signal *se,
1831         int signum,
1832         int count,
1833         void *siginfo,
1834         void *private_data)
1835 {
1836         struct rpc_host *state = talloc_get_type_abort(
1837                 private_data, struct rpc_host);
1838         pid_t pid;
1839         int wstatus;
1840
1841         while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1842                 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1843                 rpc_worker_exited(state, pid);
1844         }
1845 }
1846
1847 /*
1848  * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1849  */
1850 static void rpc_host_exit_worker(
1851         struct tevent_context *ev,
1852         struct tevent_timer *te,
1853         struct timeval current_time,
1854         void *private_data)
1855 {
1856         struct rpc_server *server = talloc_get_type_abort(
1857                 private_data, struct rpc_server);
1858         size_t i, num_workers = talloc_array_length(server->workers);
1859
1860         /*
1861          * Scan for the right worker. We don't have too many of those,
1862          * and maintaining an index would be more data structure effort.
1863          */
1864
1865         for (i=0; i<num_workers; i++) {
1866                 struct rpc_work_process *w = &server->workers[i];
1867                 NTSTATUS status;
1868
1869                 if (w->exit_timer != te) {
1870                         continue;
1871                 }
1872                 w->exit_timer = NULL;
1873
1874                 SMB_ASSERT(w->num_associations == 0);
1875
1876                 status = messaging_send(
1877                         server->host->msg_ctx,
1878                         pid_to_procid(w->pid),
1879                         MSG_SHUTDOWN,
1880                         NULL);
1881                 if (!NT_STATUS_IS_OK(status)) {
1882                         DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1883                                   nt_errstr(status));
1884                 }
1885
1886                 w->available = false;
1887                 break;
1888         }
1889 }
1890
1891 /*
1892  * rcpd_* worker replied with its status.
1893  */
1894 static void rpc_host_child_status_recv(
1895         struct messaging_context *msg,
1896         void *private_data,
1897         uint32_t msg_type,
1898         struct server_id server_id,
1899         DATA_BLOB *data)
1900 {
1901         struct rpc_host *host = talloc_get_type_abort(
1902                 private_data, struct rpc_host);
1903         size_t num_servers = talloc_array_length(host->servers);
1904         struct rpc_server *server = NULL;
1905         size_t num_workers;
1906         pid_t src_pid = procid_to_pid(&server_id);
1907         struct rpc_work_process *worker = NULL;
1908         struct rpc_worker_status status_message;
1909         enum ndr_err_code ndr_err;
1910
1911         ndr_err = ndr_pull_struct_blob_all_noalloc(
1912                 data,
1913                 &status_message,
1914                 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1915         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1916                 struct server_id_buf buf;
1917                 DBG_WARNING("Got invalid message from pid %s\n",
1918                             server_id_str_buf(server_id, &buf));
1919                 return;
1920         }
1921         if (DEBUGLEVEL >= 10) {
1922                 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1923         }
1924
1925         if (status_message.server_index >= num_servers) {
1926                 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1927                             "num_servers=%zu\n",
1928                             status_message.server_index,
1929                             num_servers);
1930                 return;
1931         }
1932
1933         server = host->servers[status_message.server_index];
1934
1935         num_workers = talloc_array_length(server->workers);
1936         if (status_message.worker_index >= num_workers) {
1937                 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1938                             "num_workers=%zu\n",
1939                             status_message.worker_index,
1940                             num_workers);
1941                 return;
1942         }
1943         worker = &server->workers[status_message.worker_index];
1944
1945         if (src_pid != worker->pid) {
1946                 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1947                             status_message.worker_index,
1948                             (int)src_pid,
1949                             worker->pid);
1950                 return;
1951         }
1952
1953         worker->available = true;
1954         worker->num_associations = status_message.num_association_groups;
1955         worker->num_connections = status_message.num_connections;
1956
1957         if (worker->num_associations != 0) {
1958                 TALLOC_FREE(worker->exit_timer);
1959         } else {
1960                 worker->exit_timer = tevent_add_timer(
1961                         messaging_tevent_context(msg),
1962                         server->workers,
1963                         tevent_timeval_current_ofs(server->idle_seconds, 0),
1964                         rpc_host_exit_worker,
1965                         server);
1966                 /* No NULL check, it's not fatal if this does not work */
1967         }
1968
1969         rpc_host_distribute_clients(server);
1970 }
1971
1972 /*
1973  * samba-dcerpcd has been asked to shutdown.
1974  * Mark the initial tevent_req as done so we
1975  * exit the event loop.
1976  */
1977 static void rpc_host_msg_shutdown(
1978         struct messaging_context *msg,
1979         void *private_data,
1980         uint32_t msg_type,
1981         struct server_id server_id,
1982         DATA_BLOB *data)
1983 {
1984         struct tevent_req *req = talloc_get_type_abort(
1985                 private_data, struct tevent_req);
1986         tevent_req_done(req);
1987 }
1988
1989 /*
1990  * Only match directory entries starting in rpcd_
1991  */
1992 static int rpcd_filter(const struct dirent *d)
1993 {
1994         int match = fnmatch("rpcd_*", d->d_name, 0);
1995         return (match == 0) ? 1 : 0;
1996 }
1997
1998 /*
1999  * Scan the given libexecdir for rpcd_* services
2000  * and return them as a strv list.
2001  */
2002 static int rpc_host_list_servers(
2003         const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2004 {
2005         char *servers = NULL;
2006         struct dirent **namelist = NULL;
2007         int i, num_servers;
2008         int ret = ENOMEM;
2009
2010         num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2011         if (num_servers == -1) {
2012                 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2013                 return errno;
2014         }
2015
2016         for (i=0; i<num_servers; i++) {
2017                 char *exe = talloc_asprintf(
2018                         mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2019                 if (exe == NULL) {
2020                         goto fail;
2021                 }
2022
2023                 ret = strv_add(mem_ctx, &servers, exe);
2024                 TALLOC_FREE(exe);
2025                 if (ret != 0) {
2026                         goto fail;
2027                 }
2028         }
2029 fail:
2030         for (i=0; i<num_servers; i++) {
2031                 SAFE_FREE(namelist[i]);
2032         }
2033         SAFE_FREE(namelist);
2034
2035         if (ret != 0) {
2036                 TALLOC_FREE(servers);
2037                 return ret;
2038         }
2039         *pservers = servers;
2040         return 0;
2041 }
2042
2043 struct rpc_host_endpoint_accept_state {
2044         struct tevent_context *ev;
2045         struct rpc_host_endpoint *endpoint;
2046 };
2047
2048 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2049 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2050
2051 /*
2052  * Asynchronously wait for a DCERPC connection from a client.
2053  */
2054 static struct tevent_req *rpc_host_endpoint_accept_send(
2055         TALLOC_CTX *mem_ctx,
2056         struct tevent_context *ev,
2057         struct rpc_host_endpoint *endpoint)
2058 {
2059         struct tevent_req *req = NULL;
2060         struct rpc_host_endpoint_accept_state *state = NULL;
2061         size_t i;
2062
2063         req = tevent_req_create(
2064                 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2065         if (req == NULL) {
2066                 return NULL;
2067         }
2068         state->ev = ev;
2069         state->endpoint = endpoint;
2070
2071         for (i=0; i<endpoint->num_fds; i++) {
2072                 struct tevent_req *subreq = NULL;
2073
2074                 subreq = accept_send(state, ev, endpoint->fds[i]);
2075                 if (tevent_req_nomem(subreq, req)) {
2076                         return tevent_req_post(req, ev);
2077                 }
2078                 tevent_req_set_callback(
2079                         subreq, rpc_host_endpoint_accept_accepted, req);
2080         }
2081
2082         return req;
2083 }
2084
2085 /*
2086  * Accept a DCERPC connection from a client.
2087  */
2088 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2089 {
2090         struct tevent_req *req = tevent_req_callback_data(
2091                 subreq, struct tevent_req);
2092         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2093                 req, struct rpc_host_endpoint_accept_state);
2094         struct rpc_host_endpoint *endpoint = state->endpoint;
2095         int sock, listen_sock, err;
2096         struct samba_sockaddr peer_addr;
2097
2098         sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2099         TALLOC_FREE(subreq);
2100         if (sock == -1) {
2101                 /* What to do here? Just ignore the error and retry? */
2102                 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2103                 tevent_req_error(req, err);
2104                 return;
2105         }
2106
2107         subreq = accept_send(state, state->ev, listen_sock);
2108         if (tevent_req_nomem(subreq, req)) {
2109                 close(sock);
2110                 sock = -1;
2111                 return;
2112         }
2113         tevent_req_set_callback(
2114                 subreq, rpc_host_endpoint_accept_accepted, req);
2115
2116         subreq = rpc_host_bind_read_send(
2117                 state,
2118                 state->ev,
2119                 dcerpc_binding_get_transport(endpoint->binding),
2120                 &sock,
2121                 &peer_addr);
2122         if (tevent_req_nomem(subreq, req)) {
2123                 return;
2124         }
2125         tevent_req_set_callback(
2126                 subreq, rpc_host_endpoint_accept_got_bind, req);
2127 }
2128
2129 /*
2130  * Client sent us a DCERPC bind packet.
2131  */
2132 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2133 {
2134         struct tevent_req *req = tevent_req_callback_data(
2135                 subreq, struct tevent_req);
2136         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2137                 req, struct rpc_host_endpoint_accept_state);
2138         struct rpc_host_endpoint *endpoint = state->endpoint;
2139         struct rpc_server *server = endpoint->server;
2140         struct rpc_host_pending_client *pending = NULL;
2141         struct rpc_host_client *client = NULL;
2142         struct ncacn_packet *bind_pkt = NULL;
2143         int ret;
2144         int sock=-1;
2145
2146         ret = rpc_host_bind_read_recv(
2147                 subreq, state, &sock, &client, &bind_pkt);
2148         TALLOC_FREE(subreq);
2149         if (ret != 0) {
2150                 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2151                           strerror(ret));
2152                 goto fail;
2153         }
2154
2155         client->binding = dcerpc_binding_string(client, endpoint->binding);
2156         if (client->binding == NULL) {
2157                 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2158                 goto fail;
2159         }
2160
2161         pending = talloc_zero(server, struct rpc_host_pending_client);
2162         if (pending == NULL) {
2163                 DBG_WARNING("talloc failed, dropping client\n");
2164                 goto fail;
2165         }
2166         pending->server = server;
2167         pending->sock = sock;
2168         pending->bind_pkt = talloc_move(pending, &bind_pkt);
2169         pending->client = talloc_move(pending, &client);
2170         talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2171         sock = -1;
2172
2173         pending->hangup_wait = wait_for_read_send(
2174                 pending, state->ev, pending->sock, true);
2175         if (pending->hangup_wait == NULL) {
2176                 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2177                 TALLOC_FREE(pending);
2178                 return;
2179         }
2180         tevent_req_set_callback(
2181                 pending->hangup_wait, rpc_host_client_exited, pending);
2182
2183         DLIST_ADD_END(server->pending_clients, pending);
2184         rpc_host_distribute_clients(server);
2185         return;
2186
2187 fail:
2188         TALLOC_FREE(client);
2189         if (sock != -1) {
2190                 close(sock);
2191         }
2192 }
2193
2194 static int rpc_host_endpoint_accept_recv(
2195         struct tevent_req *req, struct rpc_host_endpoint **ep)
2196 {
2197         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2198                 req, struct rpc_host_endpoint_accept_state);
2199
2200         *ep = state->endpoint;
2201
2202         return tevent_req_simple_recv_unix(req);
2203 }
2204
2205 /*
2206  * Full state for samba-dcerpcd. Everything else
2207  * is hung off this.
2208  */
2209 struct rpc_host_state {
2210         struct tevent_context *ev;
2211         struct rpc_host *host;
2212
2213         bool is_ready;
2214         const char *daemon_ready_progname;
2215         struct tevent_immediate *ready_signal_immediate;
2216         int *ready_signal_fds;
2217
2218         size_t num_servers;
2219         size_t num_prepared;
2220 };
2221
2222 /*
2223  * Tell whoever invoked samba-dcerpcd we're ready to
2224  * serve.
2225  */
2226 static void rpc_host_report_readiness(
2227         struct tevent_context *ev,
2228         struct tevent_immediate *im,
2229         void *private_data)
2230 {
2231         struct rpc_host_state *state = talloc_get_type_abort(
2232                 private_data, struct rpc_host_state);
2233         size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2234
2235         if (!state->is_ready) {
2236                 DBG_DEBUG("Not yet ready\n");
2237                 return;
2238         }
2239
2240         for (i=0; i<num_fds; i++) {
2241                 uint8_t byte = 0;
2242                 ssize_t nwritten;
2243
2244                 do {
2245                         nwritten = write(
2246                                 state->ready_signal_fds[i],
2247                                 (void *)&byte,
2248                                 sizeof(byte));
2249                 } while ((nwritten == -1) && (errno == EINTR));
2250
2251                 close(state->ready_signal_fds[i]);
2252         }
2253
2254         TALLOC_FREE(state->ready_signal_fds);
2255 }
2256
2257 /*
2258  * Respond to a "are you ready" message.
2259  */
2260 static bool rpc_host_ready_signal_filter(
2261         struct messaging_rec *rec, void *private_data)
2262 {
2263         struct rpc_host_state *state = talloc_get_type_abort(
2264                 private_data, struct rpc_host_state);
2265         size_t num_fds = talloc_array_length(state->ready_signal_fds);
2266         int *tmp = NULL;
2267
2268         if (rec->msg_type != MSG_DAEMON_READY_FD) {
2269                 return false;
2270         }
2271         if (rec->num_fds != 1) {
2272                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2273                 return false;
2274         }
2275
2276         if (num_fds + 1 < num_fds) {
2277                 return false;
2278         }
2279         tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2280         if (tmp == NULL) {
2281                 return false;
2282         }
2283         state->ready_signal_fds = tmp;
2284
2285         state->ready_signal_fds[num_fds] = rec->fds[0];
2286         rec->fds[0] = -1;
2287
2288         tevent_schedule_immediate(
2289                 state->ready_signal_immediate,
2290                 state->ev,
2291                 rpc_host_report_readiness,
2292                 state);
2293
2294         return false;
2295 }
2296
2297 /*
2298  * Respond to a "what is your status" message.
2299  */
2300 static bool rpc_host_dump_status_filter(
2301         struct messaging_rec *rec, void *private_data)
2302 {
2303         struct rpc_host_state *state = talloc_get_type_abort(
2304                 private_data, struct rpc_host_state);
2305         struct rpc_host *host = state->host;
2306         struct rpc_server **servers = host->servers;
2307         size_t i, num_servers = talloc_array_length(servers);
2308         FILE *f = NULL;
2309         int fd;
2310
2311         if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2312                 return false;
2313         }
2314         if (rec->num_fds != 1) {
2315                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2316                 return false;
2317         }
2318
2319         fd = dup(rec->fds[0]);
2320         if (fd == -1) {
2321                 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2322                           rec->fds[0],
2323                           strerror(errno));
2324                 return false;
2325         }
2326
2327         f = fdopen(fd, "w");
2328         if (f == NULL) {
2329                 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2330                 close(fd);
2331                 return false;
2332         }
2333
2334         for (i=0; i<num_servers; i++) {
2335                 struct rpc_server *server = servers[i];
2336                 size_t j, num_workers = talloc_array_length(server->workers);
2337                 size_t active_workers = 0;
2338
2339                 for (j=0; j<num_workers; j++) {
2340                         if (server->workers[j].pid != -1) {
2341                                 active_workers += 1;
2342                         }
2343                 }
2344
2345                 fprintf(f,
2346                         "%s: active_workers=%zu\n",
2347                         server->rpc_server_exe,
2348                         active_workers);
2349
2350                 for (j=0; j<num_workers; j++) {
2351                         struct rpc_work_process *w = &server->workers[j];
2352
2353                         if (w->pid == (pid_t)-1) {
2354                                 continue;
2355                         }
2356
2357                         fprintf(f,
2358                                 " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2359                                 j,
2360                                 (int)w->pid,
2361                                 w->num_associations,
2362                                 w->num_connections);
2363                 }
2364         }
2365
2366         fclose(f);
2367
2368         return false;
2369 }
2370
2371 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2372 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2373
2374 /*
2375  * Async startup for samba-dcerpcd.
2376  */
2377 static struct tevent_req *rpc_host_send(
2378         TALLOC_CTX *mem_ctx,
2379         struct tevent_context *ev,
2380         struct messaging_context *msg_ctx,
2381         char *servers,
2382         int ready_signal_fd,
2383         const char *daemon_ready_progname,
2384         bool is_np_helper)
2385 {
2386         struct tevent_req *req = NULL, *subreq = NULL;
2387         struct rpc_host_state *state = NULL;
2388         struct rpc_host *host = NULL;
2389         struct tevent_signal *se = NULL;
2390         char *epmdb_path = NULL;
2391         char *exe = NULL;
2392         size_t i, num_servers = strv_count(servers);
2393         NTSTATUS status;
2394         int ret;
2395
2396         req = tevent_req_create(req, &state, struct rpc_host_state);
2397         if (req == NULL) {
2398                 return NULL;
2399         }
2400         state->ev = ev;
2401         state->daemon_ready_progname = daemon_ready_progname;
2402
2403         state->ready_signal_immediate = tevent_create_immediate(state);
2404         if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2405                 return tevent_req_post(req, ev);
2406         }
2407
2408         if (ready_signal_fd != -1) {
2409                 state->ready_signal_fds = talloc_array(state, int, 1);
2410                 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2411                         return tevent_req_post(req, ev);
2412                 }
2413                 state->ready_signal_fds[0] = ready_signal_fd;
2414         }
2415
2416         state->host = talloc_zero(state, struct rpc_host);
2417         if (tevent_req_nomem(state->host, req)) {
2418                 return tevent_req_post(req, ev);
2419         }
2420         host = state->host;
2421
2422         host->msg_ctx = msg_ctx;
2423         host->np_helper = is_np_helper;
2424
2425         ret = pipe(host->worker_stdin);
2426         if (ret == -1) {
2427                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2428                 return tevent_req_post(req, ev);
2429         }
2430
2431         host->servers = talloc_zero_array(
2432                 host, struct rpc_server *, num_servers);
2433         if (tevent_req_nomem(host->servers, req)) {
2434                 return tevent_req_post(req, ev);
2435         }
2436
2437         se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2438         if (tevent_req_nomem(se, req)) {
2439                 return tevent_req_post(req, ev);
2440         }
2441         BlockSignals(false, SIGCHLD);
2442
2443         status = messaging_register(
2444                 msg_ctx,
2445                 host,
2446                 MSG_RPC_WORKER_STATUS,
2447                 rpc_host_child_status_recv);
2448         if (tevent_req_nterror(req, status)) {
2449                 return tevent_req_post(req, ev);
2450         }
2451
2452         status = messaging_register(
2453                 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2454         if (tevent_req_nterror(req, status)) {
2455                 return tevent_req_post(req, ev);
2456         }
2457
2458         subreq = messaging_filtered_read_send(
2459                 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2460         if (tevent_req_nomem(subreq, req)) {
2461                 return tevent_req_post(req, ev);
2462         }
2463
2464         subreq = messaging_filtered_read_send(
2465                 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2466         if (tevent_req_nomem(subreq, req)) {
2467                 return tevent_req_post(req, ev);
2468         }
2469
2470         epmdb_path = lock_path(state, "epmdb.tdb");
2471         if (tevent_req_nomem(epmdb_path, req)) {
2472                 return tevent_req_post(req, ev);
2473         }
2474
2475         host->epmdb = tdb_wrap_open(
2476                 host,
2477                 epmdb_path,
2478                 0,
2479                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2480                 O_RDWR|O_CREAT,
2481                 0644);
2482         if (host->epmdb == NULL) {
2483                 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2484                           epmdb_path,
2485                           strerror(errno));
2486                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2487                 return tevent_req_post(req, ev);
2488         }
2489         TALLOC_FREE(epmdb_path);
2490
2491         for (exe = strv_next(servers, exe), i = 0;
2492              exe != NULL;
2493              exe = strv_next(servers, exe), i++) {
2494
2495                 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2496
2497                 subreq = rpc_server_setup_send(
2498                         state,
2499                         ev,
2500                         host,
2501                         exe);
2502                 if (tevent_req_nomem(subreq, req)) {
2503                         return tevent_req_post(req, ev);
2504                 }
2505                 tevent_req_set_callback(
2506                         subreq, rpc_host_server_setup_done, req);
2507         }
2508
2509         return req;
2510 }
2511
2512 /*
2513  * Timer function called after we were initialized but no one
2514  * connected. Shutdown.
2515  */
2516 static void rpc_host_shutdown(
2517         struct tevent_context *ev,
2518         struct tevent_timer *te,
2519         struct timeval current_time,
2520         void *private_data)
2521 {
2522         struct tevent_req *req = talloc_get_type_abort(
2523                 private_data, struct tevent_req);
2524         DBG_DEBUG("Nobody connected -- shutting down\n");
2525         tevent_req_done(req);
2526 }
2527
2528 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2529 {
2530         struct tevent_req *req = tevent_req_callback_data(
2531                 subreq, struct tevent_req);
2532         struct rpc_host_state *state = tevent_req_data(
2533                 req, struct rpc_host_state);
2534         struct rpc_server *server = NULL;
2535         struct rpc_host *host = state->host;
2536         size_t i, num_servers = talloc_array_length(host->servers);
2537         NTSTATUS status;
2538
2539         status = rpc_server_setup_recv(subreq, host, &server);
2540         TALLOC_FREE(subreq);
2541         if (!NT_STATUS_IS_OK(status)) {
2542                 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2543                           nt_errstr(status));
2544                 host->servers = talloc_realloc(
2545                         host,
2546                         host->servers,
2547                         struct rpc_server *,
2548                         num_servers-1);
2549                 return;
2550         }
2551
2552         server->server_index = state->num_prepared;
2553         host->servers[state->num_prepared] = server;
2554
2555         state->num_prepared += 1;
2556
2557         if (state->num_prepared < num_servers) {
2558                 return;
2559         }
2560
2561         for (i=0; i<num_servers; i++) {
2562                 size_t j, num_endpoints;
2563
2564                 server = host->servers[i];
2565                 num_endpoints = talloc_array_length(server->endpoints);
2566
2567                 for (j=0; j<num_endpoints; j++) {
2568                         subreq = rpc_host_endpoint_accept_send(
2569                                 state, state->ev, server->endpoints[j]);
2570                         if (tevent_req_nomem(subreq, req)) {
2571                                 return;
2572                         }
2573                         tevent_req_set_callback(
2574                                 subreq, rpc_host_endpoint_failed, req);
2575                 }
2576         }
2577
2578         state->is_ready = true;
2579
2580         if (state->daemon_ready_progname != NULL) {
2581                 daemon_ready(state->daemon_ready_progname);
2582         }
2583
2584         if (host->np_helper) {
2585                 /*
2586                  * If we're started as an np helper, and no one talks to
2587                  * us within 10 seconds, just shut ourselves down.
2588                  */
2589                 host->np_helper_shutdown = tevent_add_timer(
2590                         state->ev,
2591                         state,
2592                         timeval_current_ofs(10, 0),
2593                         rpc_host_shutdown,
2594                         req);
2595                 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2596                         return;
2597                 }
2598         }
2599
2600         tevent_schedule_immediate(
2601                 state->ready_signal_immediate,
2602                 state->ev,
2603                 rpc_host_report_readiness,
2604                 state);
2605 }
2606
2607 /*
2608  * Log accept fail on an endpoint.
2609  */
2610 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2611 {
2612         struct tevent_req *req = tevent_req_callback_data(
2613                 subreq, struct tevent_req);
2614         struct rpc_host_state *state = tevent_req_data(
2615                 req, struct rpc_host_state);
2616         struct rpc_host_endpoint *endpoint = NULL;
2617         char *binding_string = NULL;
2618         int ret;
2619
2620         ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2621         TALLOC_FREE(subreq);
2622
2623         binding_string = dcerpc_binding_string(state, endpoint->binding);
2624         DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2625                   binding_string,
2626                   strerror(ret));
2627         TALLOC_FREE(binding_string);
2628 }
2629
2630 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2631 {
2632         return tevent_req_simple_recv_ntstatus(req);
2633 }
2634
2635 static int rpc_host_pidfile_create(
2636         struct messaging_context *msg_ctx,
2637         const char *progname,
2638         int ready_signal_fd)
2639 {
2640         const char *piddir = lp_pid_directory();
2641         size_t len = strlen(piddir) + strlen(progname) + 6;
2642         char pidFile[len];
2643         pid_t existing_pid;
2644         int fd, ret;
2645
2646         snprintf(pidFile,
2647                  sizeof(pidFile),
2648                  "%s/%s.pid",
2649                  piddir, progname);
2650
2651         ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2652         if (ret == 0) {
2653                 /* leak fd */
2654                 return 0;
2655         }
2656
2657         if (ret != EAGAIN) {
2658                 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2659                           strerror(ret));
2660                 return ret;
2661         }
2662
2663         DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2664
2665         if (ready_signal_fd != -1) {
2666                 NTSTATUS status = messaging_send_iov(
2667                         msg_ctx,
2668                         pid_to_procid(existing_pid),
2669                         MSG_DAEMON_READY_FD,
2670                         NULL,
2671                         0,
2672                         &ready_signal_fd,
2673                         1);
2674                 if (!NT_STATUS_IS_OK(status)) {
2675                         DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2676                                   nt_errstr(status));
2677                 }
2678         }
2679
2680         return EAGAIN;
2681 }
2682
2683 static void samba_dcerpcd_stdin_handler(
2684         struct tevent_context *ev,
2685         struct tevent_fd *fde,
2686         uint16_t flags,
2687         void *private_data)
2688 {
2689         struct tevent_req *req = talloc_get_type_abort(
2690                 private_data, struct tevent_req);
2691         char c;
2692
2693         if (read(0, &c, 1) != 1) {
2694                 /* we have reached EOF on stdin, which means the
2695                    parent has exited. Shutdown the server */
2696                 tevent_req_done(req);
2697         }
2698 }
2699
2700 /*
2701  * samba-dcerpcd microservice startup !
2702  */
2703 int main(int argc, const char *argv[])
2704 {
2705         const struct loadparm_substitution *lp_sub =
2706                 loadparm_s3_global_substitution();
2707         const char *progname = getprogname();
2708         TALLOC_CTX *frame = NULL;
2709         struct tevent_context *ev_ctx = NULL;
2710         struct messaging_context *msg_ctx = NULL;
2711         struct tevent_req *req = NULL;
2712         char *servers = NULL;
2713         const char *arg = NULL;
2714         size_t num_servers;
2715         poptContext pc;
2716         int ret, err;
2717         NTSTATUS status;
2718         bool log_stdout;
2719         bool ok;
2720
2721         int libexec_rpcds = 0;
2722         int np_helper = 0;
2723         int ready_signal_fd = -1;
2724
2725         struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2726         struct poptOption long_options[] = {
2727                 POPT_AUTOHELP
2728                 {
2729                         .longName   = "libexec-rpcds",
2730                         .argInfo    = POPT_ARG_NONE,
2731                         .arg        = &libexec_rpcds,
2732                         .descrip    = "Use all rpcds in libexec",
2733                 },
2734                 {
2735                         .longName   = "ready-signal-fd",
2736                         .argInfo    = POPT_ARG_INT,
2737                         .arg        = &ready_signal_fd,
2738                         .descrip    = "fd to close when initialized",
2739                 },
2740                 {
2741                         .longName   = "np-helper",
2742                         .argInfo    = POPT_ARG_NONE,
2743                         .arg        = &np_helper,
2744                         .descrip    = "Internal named pipe server",
2745                 },
2746                 POPT_COMMON_SAMBA
2747                 POPT_COMMON_DAEMON
2748                 POPT_COMMON_VERSION
2749                 POPT_TABLEEND
2750         };
2751
2752         {
2753                 const char *fd_params[] = { "ready-signal-fd", };
2754
2755                 closefrom_except_fd_params(
2756                         3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2757         }
2758
2759         talloc_enable_null_tracking();
2760         frame = talloc_stackframe();
2761         umask(0);
2762         sec_init();
2763         smb_init_locale();
2764
2765         ok = samba_cmdline_init(frame,
2766                                 SAMBA_CMDLINE_CONFIG_SERVER,
2767                                 true /* require_smbconf */);
2768         if (!ok) {
2769                 DBG_ERR("Failed to init cmdline parser!\n");
2770                 TALLOC_FREE(frame);
2771                 exit(ENOMEM);
2772         }
2773
2774         pc = samba_popt_get_context(getprogname(),
2775                                     argc,
2776                                     argv,
2777                                     long_options,
2778                                     0);
2779         if (pc == NULL) {
2780                 DBG_ERR("Failed to setup popt context!\n");
2781                 TALLOC_FREE(frame);
2782                 exit(1);
2783         }
2784
2785         poptSetOtherOptionHelp(
2786                 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2787
2788         ret = poptGetNextOpt(pc);
2789
2790         if (ret != -1) {
2791                 if (ret >= 0) {
2792                         fprintf(stderr,
2793                                 "\nGot unexpected option %d\n",
2794                                 ret);
2795                 } else if (ret == POPT_ERROR_BADOPT) {
2796                         fprintf(stderr,
2797                                 "\nInvalid option %s: %s\n\n",
2798                                 poptBadOption(pc, 0),
2799                                 poptStrerror(ret));
2800                 } else {
2801                         fprintf(stderr,
2802                                 "\npoptGetNextOpt returned %s\n",
2803                                 poptStrerror(ret));
2804                 }
2805
2806                 poptFreeContext(pc);
2807                 TALLOC_FREE(frame);
2808                 exit(1);
2809         }
2810
2811         while ((arg = poptGetArg(pc)) != NULL) {
2812                 ret = strv_add(frame, &servers, arg);
2813                 if (ret != 0) {
2814                         DBG_ERR("strv_add() failed\n");
2815                         poptFreeContext(pc);
2816                         TALLOC_FREE(frame);
2817                         exit(1);
2818                 }
2819         }
2820
2821         log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2822         if (log_stdout) {
2823                 setup_logging(progname, DEBUG_STDOUT);
2824         } else {
2825                 setup_logging(progname, DEBUG_FILE);
2826         }
2827
2828         /*
2829          * If "rpc start on demand helpers = true" in smb.conf we must
2830          * not start as standalone, only on demand from
2831          * local_np_connect() functions. Log an error message telling
2832          * the admin how to fix and then exit.
2833          */
2834         if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2835                 DBG_ERR("Cannot start in standalone mode if smb.conf "
2836                         "[global] setting "
2837                         "\"rpc start on demand helpers = true\" - "
2838                         "exiting\n");
2839                         TALLOC_FREE(frame);
2840                         exit(1);
2841         }
2842
2843         if (libexec_rpcds != 0) {
2844                 ret = rpc_host_list_servers(
2845                         dyn_SAMBA_LIBEXECDIR, frame, &servers);
2846                 if (ret != 0) {
2847                         DBG_ERR("Could not list libexec: %s\n",
2848                                 strerror(ret));
2849                         poptFreeContext(pc);
2850                         TALLOC_FREE(frame);
2851                         exit(1);
2852                 }
2853         }
2854
2855         num_servers = strv_count(servers);
2856         if (num_servers == 0) {
2857                 poptPrintUsage(pc, stderr, 0);
2858                 poptFreeContext(pc);
2859                 TALLOC_FREE(frame);
2860                 exit(1);
2861         }
2862
2863         poptFreeContext(pc);
2864
2865         cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2866
2867         if (log_stdout && cmdline_daemon_cfg->fork) {
2868                 DBG_ERR("Can't log to stdout unless in foreground\n");
2869                 TALLOC_FREE(frame);
2870                 exit(1);
2871         }
2872
2873         msg_ctx = global_messaging_context();
2874         if (msg_ctx == NULL) {
2875                 DBG_ERR("messaging_init() failed\n");
2876                 TALLOC_FREE(frame);
2877                 exit(1);
2878         }
2879         ev_ctx = messaging_tevent_context(msg_ctx);
2880
2881         if (cmdline_daemon_cfg->fork) {
2882                 become_daemon(
2883                         true,
2884                         cmdline_daemon_cfg->no_process_group,
2885                         log_stdout);
2886
2887                 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2888                 if (!NT_STATUS_IS_OK(status)) {
2889                         exit_daemon("reinit_after_fork() failed",
2890                                     map_errno_from_nt_status(status));
2891                 }
2892         } else {
2893                 DBG_DEBUG("Calling daemon_status\n");
2894                 daemon_status(progname, "Starting process ... ");
2895         }
2896
2897         BlockSignals(true, SIGPIPE);
2898
2899         dump_core_setup(progname, lp_logfile(frame, lp_sub));
2900
2901         reopen_logs();
2902
2903         DEBUG(0, ("%s version %s started.\n",
2904                   progname,
2905                   samba_version_string()));
2906         DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2907
2908         (void)winbind_off();
2909         ok = init_guest_session_info(frame);
2910         (void)winbind_on();
2911         if (!ok) {
2912                 DBG_ERR("init_guest_session_info failed\n");
2913                 global_messaging_context_free();
2914                 TALLOC_FREE(frame);
2915                 exit(1);
2916         }
2917
2918         ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2919         if (ret != 0) {
2920                 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2921                           strerror(ret));
2922                 global_messaging_context_free();
2923                 TALLOC_FREE(frame);
2924                 exit(1);
2925         }
2926
2927         req = rpc_host_send(
2928                 ev_ctx,
2929                 ev_ctx,
2930                 msg_ctx,
2931                 servers,
2932                 ready_signal_fd,
2933                 cmdline_daemon_cfg->fork ? NULL : progname,
2934                 np_helper != 0);
2935         if (req == NULL) {
2936                 DBG_ERR("rpc_host_send failed\n");
2937                 global_messaging_context_free();
2938                 TALLOC_FREE(frame);
2939                 exit(1);
2940         }
2941
2942         if (!cmdline_daemon_cfg->fork) {
2943                 struct stat st;
2944                 if (fstat(0, &st) != 0) {
2945                         DBG_DEBUG("fstat(0) failed: %s\n",
2946                                   strerror(errno));
2947                         global_messaging_context_free();
2948                         TALLOC_FREE(frame);
2949                         exit(1);
2950                 }
2951                 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2952                         tevent_add_fd(
2953                                 ev_ctx,
2954                                 ev_ctx,
2955                                 0,
2956                                 TEVENT_FD_READ,
2957                                 samba_dcerpcd_stdin_handler,
2958                                 req);
2959                 }
2960         }
2961
2962         ok = tevent_req_poll_unix(req, ev_ctx, &err);
2963         if (!ok) {
2964                 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2965                         strerror(err));
2966                 global_messaging_context_free();
2967                 TALLOC_FREE(frame);
2968                 exit(1);
2969         }
2970
2971         status = rpc_host_recv(req);
2972         if (!NT_STATUS_IS_OK(status)) {
2973                 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2974                 global_messaging_context_free();
2975                 TALLOC_FREE(frame);
2976                 exit(1);
2977         }
2978
2979         TALLOC_FREE(frame);
2980
2981         return 0;
2982 }