7c5785f3a3bbe58b375a6a2b10224853839eb0ad
[samba.git] / source3 / rpc_server / rpc_host.c
1 /*
2  *  RPC host
3  *
4  *  Implements samba-dcerpcd service.
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 /*
21  * This binary has two usage modes:
22  *
23  * In the normal case when invoked from smbd or winbind it is given a
24  * directory to scan via --libexec-rpcds and will invoke on demand any
25  * binaries it finds there starting with rpcd_ when a named pipe
26  * connection is requested.
27  *
28  * In the second mode it can be started explicitly from system startup
29  * scripts.
30  *
31  * When Samba is set up as an Active Directory Domain Controller the
32  * normal samba binary overrides and provides DCERPC services, whilst
33  * allowing samba-dcerpcd to provide the services that smbd used to
34  * provide in that set-up, such as SRVSVC.
35  *
36  * The second mode can also be useful for use outside of the Samba framework,
37  * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38  * it behaves like inetd and listens on sockets on behalf of RPC server
39  * implementations.
40  */
41
42 #include "replace.h"
43 #include <fnmatch.h>
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
49 #include "messages.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/util/util_file.h"
58 #include "lib/tdb_wrap/tdb_wrap.h"
59 #include "lib/async_req/async_sock.h"
60 #include "librpc/rpc/dcerpc_util.h"
61 #include "lib/tsocket/tsocket.h"
62 #include "libcli/named_pipe_auth/npa_tstream.h"
63 #include "librpc/gen_ndr/ndr_rpc_host.h"
64 #include "source3/param/loadparm.h"
65 #include "source3/lib/global_contexts.h"
66 #include "lib/util/strv.h"
67 #include "lib/util/pidfile.h"
68 #include "source3/rpc_client/cli_pipe.h"
69 #include "librpc/gen_ndr/ndr_epmapper.h"
70 #include "librpc/gen_ndr/ndr_epmapper_c.h"
71 #include "nsswitch/winbind_client.h"
72 #include "libcli/security/dom_sid.h"
73 #include "libcli/security/security_token.h"
74
75 extern bool override_logfile;
76
77 struct rpc_server;
78 struct rpc_work_process;
79
80 /*
81  * samba-dcerpcd state to keep track of rpcd_* servers.
82  */
83 struct rpc_host {
84         struct messaging_context *msg_ctx;
85         struct rpc_server **servers;
86         struct tdb_wrap *epmdb;
87
88         int worker_stdin[2];
89
90         bool np_helper;
91
92         /*
93          * If we're started with --np-helper but nobody contacts us,
94          * we need to exit after a while. This will be deleted once
95          * the first real client connects and our self-exit mechanism
96          * when we don't have any worker processes left kicks in.
97          */
98         struct tevent_timer *np_helper_shutdown;
99 };
100
101 /*
102  * Map a RPC interface to a name. Used when filling the endpoint
103  * mapper database
104  */
105 struct rpc_host_iface_name {
106         struct ndr_syntax_id iface;
107         char *name;
108 };
109
110 /*
111  * rpc_host representation for listening sockets. ncacn_ip_tcp might
112  * listen on multiple explicit IPs, all with the same port.
113  */
114 struct rpc_host_endpoint {
115         struct rpc_server *server;
116         struct dcerpc_binding *binding;
117         struct ndr_syntax_id *interfaces;
118         int *fds;
119         size_t num_fds;
120 };
121
122 /*
123  * Staging area until we sent the socket plus bind to the helper
124  */
125 struct rpc_host_pending_client {
126         struct rpc_host_pending_client *prev, *next;
127
128         /*
129          * Pointer for the destructor to remove us from the list of
130          * pending clients
131          */
132         struct rpc_server *server;
133
134         /*
135          * Waiter for client exit before a helper accepted the request
136          */
137         struct tevent_req *hangup_wait;
138
139         /*
140          * Info to pick the worker
141          */
142         struct ncacn_packet *bind_pkt;
143
144         /*
145          * This is what we send down to the worker
146          */
147         int sock;
148         struct rpc_host_client *client;
149 };
150
151 /*
152  * Representation of one worker process. For each rpcd_* executable
153  * there will be more of than one of these.
154  */
155 struct rpc_work_process {
156         pid_t pid;
157
158         /*
159          * !available means:
160          *
161          * Worker forked but did not send its initial status yet (not
162          * yet initialized)
163          *
164          * Worker died, but we did not receive SIGCHLD yet. We noticed
165          * it because we couldn't send it a message.
166          */
167         bool available;
168
169         /*
170          * Incremented by us when sending a client, decremented by
171          * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
172          * client exits.
173          */
174         uint32_t num_associations;
175         uint32_t num_connections;
176
177         /*
178          * Send SHUTDOWN to an idle child after a while
179          */
180         struct tevent_timer *exit_timer;
181 };
182
183 /*
184  * State for a set of running instances of an rpcd_* server executable
185  */
186 struct rpc_server {
187         struct rpc_host *host;
188         /*
189          * Index into the rpc_host_state->servers array
190          */
191         uint32_t server_index;
192
193         const char *rpc_server_exe;
194
195         struct rpc_host_endpoint **endpoints;
196         struct rpc_host_iface_name *iface_names;
197
198         size_t max_workers;
199         size_t idle_seconds;
200
201         /*
202          * "workers" can be larger than "max_workers": Internal
203          * connections require an idle worker to avoid deadlocks
204          * between RPC servers: netlogon requires samr, everybody
205          * requires winreg. And if a deep call in netlogon asks for a
206          * samr connection, this must never end up in the same
207          * process. named_pipe_auth_req_info8->need_idle_server is set
208          * in those cases.
209          */
210         struct rpc_work_process *workers;
211
212         struct rpc_host_pending_client *pending_clients;
213 };
214
215 struct rpc_server_get_endpoints_state {
216         char **argl;
217         char *ncalrpc_endpoint;
218         enum dcerpc_transport_t only_transport;
219
220         struct rpc_host_iface_name *iface_names;
221         struct rpc_host_endpoint **endpoints;
222
223         unsigned long num_workers;
224         unsigned long idle_seconds;
225 };
226
227 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
228
229 /**
230  * @brief Query interfaces from an rpcd helper
231  *
232  * Spawn a rpcd helper, ask it for the interfaces it serves via
233  * --list-interfaces, parse the output
234  *
235  * @param[in] mem_ctx Memory context for the tevent_req
236  * @param[in] ev Event context to run this on
237  * @param[in] rpc_server_exe Binary to ask with --list-interfaces
238  * @param[in] only_transport Filter out anything but this
239  * @return The tevent_req representing this process
240  */
241
242 static struct tevent_req *rpc_server_get_endpoints_send(
243         TALLOC_CTX *mem_ctx,
244         struct tevent_context *ev,
245         const char *rpc_server_exe,
246         enum dcerpc_transport_t only_transport)
247 {
248         struct tevent_req *req = NULL, *subreq = NULL;
249         struct rpc_server_get_endpoints_state *state = NULL;
250         const char *progname = NULL;
251
252         req = tevent_req_create(
253                 mem_ctx, &state, struct rpc_server_get_endpoints_state);
254         if (req == NULL) {
255                 return NULL;
256         }
257         state->only_transport = only_transport;
258
259         progname = strrchr(rpc_server_exe, '/');
260         if (progname != NULL) {
261                 progname += 1;
262         } else {
263                 progname = rpc_server_exe;
264         }
265
266         state->ncalrpc_endpoint = talloc_strdup(state, progname);
267         if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
268                 return tevent_req_post(req, ev);
269         }
270
271         state->argl = talloc_array(state, char *, 4);
272         if (tevent_req_nomem(state->argl, req)) {
273                 return tevent_req_post(req, ev);
274         }
275
276         state->argl = str_list_make_empty(state);
277         str_list_add_printf(&state->argl, "%s", rpc_server_exe);
278         str_list_add_printf(&state->argl, "--list-interfaces");
279         str_list_add_printf(
280                 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
281
282         if (tevent_req_nomem(state->argl, req)) {
283                 return tevent_req_post(req, ev);
284         }
285
286         subreq = file_ploadv_send(state, ev, state->argl, 65536);
287         if (tevent_req_nomem(subreq, req)) {
288                 return tevent_req_post(req, ev);
289         }
290         tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
291         return req;
292 }
293
294 /*
295  * Parse a line of format
296  *
297  * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
298  *
299  * and add it to the "piface_names" array.
300  */
301
302 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
303         TALLOC_CTX *mem_ctx,
304         struct rpc_host_iface_name **piface_names,
305         const char *line)
306 {
307         struct rpc_host_iface_name *iface_names = *piface_names;
308         struct rpc_host_iface_name *tmp = NULL, *result = NULL;
309         size_t i, num_ifaces = talloc_array_length(iface_names);
310         struct ndr_syntax_id iface;
311         char *name = NULL;
312         bool ok;
313
314         ok = ndr_syntax_id_from_string(line, &iface);
315         if (!ok) {
316                 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
317                             line);
318                 return NULL;
319         }
320
321         name = strchr(line, ' ');
322         if (name == NULL) {
323                 return NULL;
324         }
325         name += 1;
326
327         for (i=0; i<num_ifaces; i++) {
328                 result = &iface_names[i];
329
330                 if (ndr_syntax_id_equal(&result->iface, &iface)) {
331                         return result;
332                 }
333         }
334
335         if (num_ifaces + 1 < num_ifaces) {
336                 return NULL;
337         }
338
339         name = talloc_strdup(mem_ctx, name);
340         if (name == NULL) {
341                 return NULL;
342         }
343
344         tmp = talloc_realloc(
345                 mem_ctx,
346                 iface_names,
347                 struct rpc_host_iface_name,
348                 num_ifaces + 1);
349         if (tmp == NULL) {
350                 TALLOC_FREE(name);
351                 return NULL;
352         }
353         iface_names = tmp;
354
355         result = &iface_names[num_ifaces];
356
357         *result = (struct rpc_host_iface_name) {
358                 .iface = iface,
359                 .name = talloc_move(iface_names, &name),
360         };
361
362         *piface_names = iface_names;
363
364         return result;
365 }
366
367 static struct rpc_host_iface_name *rpc_host_iface_names_find(
368         struct rpc_host_iface_name *iface_names,
369         const struct ndr_syntax_id *iface)
370 {
371         size_t i, num_iface_names = talloc_array_length(iface_names);
372
373         for (i=0; i<num_iface_names; i++) {
374                 struct rpc_host_iface_name *iface_name = &iface_names[i];
375
376                 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
377                         return iface_name;
378                 }
379         }
380
381         return NULL;
382 }
383
384 static bool dcerpc_binding_same_endpoint(
385         const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
386 {
387         enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
388         enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
389         const char *e1 = NULL, *e2 = NULL;
390         int cmp;
391
392         if (t1 != t2) {
393                 return false;
394         }
395
396         e1 = dcerpc_binding_get_string_option(b1, "endpoint");
397         e2 = dcerpc_binding_get_string_option(b2, "endpoint");
398
399         if ((e1 == NULL) && (e2 == NULL)) {
400                 return true;
401         }
402         if ((e1 == NULL) || (e2 == NULL)) {
403                 return false;
404         }
405         cmp = strcmp(e1, e2);
406         return (cmp == 0);
407 }
408
409 /**
410  * @brief Filter whether we want to serve an endpoint
411  *
412  * samba-dcerpcd might want to serve all endpoints a rpcd reported to
413  * us via --list-interfaces.
414  *
415  * In member mode, we only serve named pipes. Indicated by NCACN_NP
416  * passed in via "only_transport".
417  *
418  * @param[in] binding Which binding is in question?
419  * @param[in] only_transport Exclusive transport to serve
420  * @return Do we want to serve "binding" from samba-dcerpcd?
421  */
422
423 static bool rpc_host_serve_endpoint(
424         struct dcerpc_binding *binding,
425         enum dcerpc_transport_t only_transport)
426 {
427         enum dcerpc_transport_t transport =
428                 dcerpc_binding_get_transport(binding);
429
430         if (only_transport == NCA_UNKNOWN) {
431                 /* no filter around */
432                 return true;
433         }
434
435         if (transport != only_transport) {
436                 /* filter out */
437                 return false;
438         }
439
440         return true;
441 }
442
443 static struct rpc_host_endpoint *rpc_host_endpoint_find(
444         struct rpc_server_get_endpoints_state *state,
445         const char *binding_string)
446 {
447         size_t i, num_endpoints = talloc_array_length(state->endpoints);
448         struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
449         enum dcerpc_transport_t transport;
450         NTSTATUS status;
451         bool serve_this;
452
453         ep = talloc_zero(state, struct rpc_host_endpoint);
454         if (ep == NULL) {
455                 goto fail;
456         }
457
458         status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
459         if (!NT_STATUS_IS_OK(status)) {
460                 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
461                           binding_string,
462                           nt_errstr(status));
463                 goto fail;
464         }
465
466         serve_this = rpc_host_serve_endpoint(
467                 ep->binding, state->only_transport);
468         if (!serve_this) {
469                 goto fail;
470         }
471
472         transport = dcerpc_binding_get_transport(ep->binding);
473
474         if (transport == NCALRPC) {
475                 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
476                         ep->binding, "endpoint");
477
478                 if (ncalrpc_sock == NULL) {
479                         /*
480                          * generic ncalrpc:, set program-specific
481                          * socket name. epmapper will redirect clients
482                          * properly.
483                          */
484                         status = dcerpc_binding_set_string_option(
485                                 ep->binding,
486                                 "endpoint",
487                                 state->ncalrpc_endpoint);
488                         if (!NT_STATUS_IS_OK(status)) {
489                                 DBG_DEBUG("dcerpc_binding_set_string_option "
490                                           "failed: %s\n",
491                                           nt_errstr(status));
492                                 goto fail;
493                         }
494                 }
495         }
496
497         for (i=0; i<num_endpoints; i++) {
498
499                 bool ok = dcerpc_binding_same_endpoint(
500                         ep->binding, state->endpoints[i]->binding);
501
502                 if (ok) {
503                         TALLOC_FREE(ep);
504                         return state->endpoints[i];
505                 }
506         }
507
508         if (num_endpoints + 1 < num_endpoints) {
509                 goto fail;
510         }
511
512         tmp = talloc_realloc(
513                 state,
514                 state->endpoints,
515                 struct rpc_host_endpoint *,
516                 num_endpoints + 1);
517         if (tmp == NULL) {
518                 goto fail;
519         }
520         state->endpoints = tmp;
521         state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
522
523         return state->endpoints[num_endpoints];
524 fail:
525         TALLOC_FREE(ep);
526         return NULL;
527 }
528
529 static bool ndr_interfaces_add_unique(
530         TALLOC_CTX *mem_ctx,
531         struct ndr_syntax_id **pifaces,
532         const struct ndr_syntax_id *iface)
533 {
534         struct ndr_syntax_id *ifaces = *pifaces;
535         size_t i, num_ifaces = talloc_array_length(ifaces);
536
537         for (i=0; i<num_ifaces; i++) {
538                 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
539                         return true;
540                 }
541         }
542
543         if (num_ifaces + 1 < num_ifaces) {
544                 return false;
545         }
546         ifaces = talloc_realloc(
547                 mem_ctx,
548                 ifaces,
549                 struct ndr_syntax_id,
550                 num_ifaces + 1);
551         if (ifaces == NULL) {
552                 return false;
553         }
554         ifaces[num_ifaces] = *iface;
555
556         *pifaces = ifaces;
557         return true;
558 }
559
560 /*
561  * Read the text reply from the rpcd_* process telling us what
562  * endpoints it will serve when asked with --list-interfaces.
563  */
564 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
565 {
566         struct tevent_req *req = tevent_req_callback_data(
567                 subreq, struct tevent_req);
568         struct rpc_server_get_endpoints_state *state = tevent_req_data(
569                 req, struct rpc_server_get_endpoints_state);
570         struct rpc_host_iface_name *iface = NULL;
571         uint8_t *buf = NULL;
572         size_t buflen;
573         char **lines = NULL;
574         int ret, i, num_lines;
575
576         ret = file_ploadv_recv(subreq, state, &buf);
577         TALLOC_FREE(subreq);
578         if (tevent_req_error(req, ret)) {
579                 return;
580         }
581
582         buflen = talloc_get_size(buf);
583         if (buflen == 0) {
584                 tevent_req_done(req);
585                 return;
586         }
587
588         lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
589         if (tevent_req_nomem(lines, req)) {
590                 return;
591         }
592
593         if (num_lines < 2) {
594                 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
595                 tevent_req_error(req, EINVAL);
596                 return;
597         }
598
599         state->num_workers = smb_strtoul(
600                 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
601         if (ret != 0) {
602                 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
603                           lines[0],
604                           strerror(ret));
605                 tevent_req_error(req, ret);
606                 return;
607         }
608         /*
609          * We need to limit the number of workers in order
610          * to put the worker index into a 16-bit space,
611          * in order to use a 16-bit association group space
612          * per worker.
613          */
614         if (state->num_workers > 65536) {
615                 state->num_workers = 65536;
616         }
617
618         state->idle_seconds = smb_strtoul(
619                 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
620         if (ret != 0) {
621                 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
622                           lines[1],
623                           strerror(ret));
624                 tevent_req_error(req, ret);
625                 return;
626         }
627
628         DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
629                   state->num_workers,
630                   state->idle_seconds,
631                   state->argl[0]);
632
633         for (i=2; i<num_lines; i++) {
634                 char *line = lines[i];
635                 struct rpc_host_endpoint *endpoint = NULL;
636                 bool ok;
637
638                 if (line[0] != ' ') {
639                         iface = rpc_exe_parse_iface_line(
640                                 state, &state->iface_names, line);
641                         if (iface == NULL) {
642                                 DBG_WARNING(
643                                         "rpc_exe_parse_iface_line failed "
644                                         "for: [%s] from %s\n",
645                                         line,
646                                         state->argl[0]);
647                                 tevent_req_oom(req);
648                                 return;
649                         }
650                         continue;
651                 }
652
653                 if (iface == NULL) {
654                         DBG_DEBUG("Interface GUID line missing\n");
655                         tevent_req_error(req, EINVAL);
656                         return;
657                 }
658
659                 endpoint = rpc_host_endpoint_find(state, line+1);
660                 if (endpoint == NULL) {
661                         DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
662                                   line+1);
663                         continue;
664                 }
665
666                 ok = ndr_interfaces_add_unique(
667                         endpoint,
668                         &endpoint->interfaces,
669                         &iface->iface);
670                 if (!ok) {
671                         DBG_DEBUG("ndr_interfaces_add_unique failed\n");
672                         tevent_req_oom(req);
673                         return;
674                 }
675         }
676
677         tevent_req_done(req);
678 }
679
680 /**
681  * @brief Receive output from --list-interfaces
682  *
683  * @param[in] req The async req that just finished
684  * @param[in] mem_ctx Where to put the output on
685  * @param[out] endpoints The endpoints to be listened on
686  * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
687  * @return 0/errno
688  */
689 static int rpc_server_get_endpoints_recv(
690         struct tevent_req *req,
691         TALLOC_CTX *mem_ctx,
692         struct rpc_host_endpoint ***endpoints,
693         struct rpc_host_iface_name **iface_names,
694         size_t *num_workers,
695         size_t *idle_seconds)
696 {
697         struct rpc_server_get_endpoints_state *state = tevent_req_data(
698                 req, struct rpc_server_get_endpoints_state);
699         int err;
700
701         if (tevent_req_is_unix_error(req, &err)) {
702                 tevent_req_received(req);
703                 return err;
704         }
705
706         *endpoints = talloc_move(mem_ctx, &state->endpoints);
707         *iface_names = talloc_move(mem_ctx, &state->iface_names);
708         *num_workers = state->num_workers;
709         *idle_seconds = state->idle_seconds;
710         tevent_req_received(req);
711         return 0;
712 }
713
714 /*
715  * For NCACN_NP we get the named pipe auth info from smbd, if a client
716  * comes in via TCP or NCALPRC we need to invent it ourselves with
717  * anonymous session info.
718  */
719
720 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
721         TALLOC_CTX *mem_ctx,
722         enum dcerpc_transport_t transport,
723         int sock,
724         const struct samba_sockaddr *peer_addr,
725         struct named_pipe_auth_req_info8 **pinfo8)
726 {
727         struct named_pipe_auth_req_info8 *info8 = NULL;
728         struct samba_sockaddr local_addr = {
729                 .sa_socklen = sizeof(struct sockaddr_storage),
730         };
731         struct tsocket_address *taddr = NULL;
732         char *remote_client_name = NULL;
733         char *remote_client_addr = NULL;
734         char *local_server_name = NULL;
735         char *local_server_addr = NULL;
736         char *(*tsocket_address_to_name_fn)(
737                 const struct tsocket_address *addr,
738                 TALLOC_CTX *mem_ctx) = NULL;
739         NTSTATUS status = NT_STATUS_NO_MEMORY;
740         int ret;
741
742         /*
743          * For NCACN_NP we get the npa info from smbd
744          */
745         SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
746
747         tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
748                 tsocket_address_inet_addr_string : tsocket_address_unix_path;
749
750         info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
751         if (info8 == NULL) {
752                 goto fail;
753         }
754         info8->session_info =
755                 talloc_zero(info8, struct auth_session_info_transport);
756         if (info8->session_info == NULL) {
757                 goto fail;
758         }
759
760         status = make_session_info_anonymous(
761                 info8->session_info,
762                 &info8->session_info->session_info);
763         if (!NT_STATUS_IS_OK(status)) {
764                 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
765                           nt_errstr(status));
766                 goto fail;
767         }
768
769         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
770                                                       peer_addr,
771                                                       &taddr);
772         if (ret == -1) {
773                 status = map_nt_error_from_unix(errno);
774                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
775                           "%s\n",
776                           strerror(errno));
777                 goto fail;
778         }
779         remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
780         if (remote_client_addr == NULL) {
781                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
782                 goto nomem;
783         }
784         TALLOC_FREE(taddr);
785
786         remote_client_name = talloc_strdup(info8, remote_client_addr);
787         if (remote_client_name == NULL) {
788                 DBG_DEBUG("talloc_strdup failed\n");
789                 goto nomem;
790         }
791
792         if (transport == NCACN_IP_TCP) {
793                 bool ok = samba_sockaddr_get_port(peer_addr,
794                                                   &info8->remote_client_port);
795                 if (!ok) {
796                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
797                         status = NT_STATUS_INVALID_PARAMETER;
798                         goto fail;
799                 }
800         }
801
802         ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
803         if (ret == -1) {
804                 status = map_nt_error_from_unix(errno);
805                 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
806                 goto fail;
807         }
808
809         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
810                                                       &local_addr,
811                                                       &taddr);
812         if (ret == -1) {
813                 status = map_nt_error_from_unix(errno);
814                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
815                           "%s\n",
816                           strerror(errno));
817                 goto fail;
818         }
819         local_server_addr = tsocket_address_to_name_fn(taddr, info8);
820         if (local_server_addr == NULL) {
821                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
822                 goto nomem;
823         }
824         TALLOC_FREE(taddr);
825
826         local_server_name = talloc_strdup(info8, local_server_addr);
827         if (local_server_name == NULL) {
828                 DBG_DEBUG("talloc_strdup failed\n");
829                 goto nomem;
830         }
831
832         if (transport == NCACN_IP_TCP) {
833                 bool ok = samba_sockaddr_get_port(&local_addr,
834                                                   &info8->local_server_port);
835                 if (!ok) {
836                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
837                         status = NT_STATUS_INVALID_PARAMETER;
838                         goto fail;
839                 }
840         }
841
842         if (transport == NCALRPC) {
843                 uid_t uid;
844                 gid_t gid;
845
846                 ret = getpeereid(sock, &uid, &gid);
847                 if (ret < 0) {
848                         status = map_nt_error_from_unix(errno);
849                         DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
850                         goto fail;
851                 }
852
853                 if (uid == sec_initial_uid()) {
854
855                         /*
856                          * Indicate "root" to gensec
857                          */
858
859                         TALLOC_FREE(remote_client_addr);
860                         TALLOC_FREE(remote_client_name);
861
862                         ret = tsocket_address_unix_from_path(
863                                 info8,
864                                 AS_SYSTEM_MAGIC_PATH_TOKEN,
865                                 &taddr);
866                         if (ret == -1) {
867                                 DBG_DEBUG("tsocket_address_unix_from_path "
868                                           "failed\n");
869                                 goto nomem;
870                         }
871
872                         remote_client_addr =
873                                 tsocket_address_unix_path(taddr, info8);
874                         if (remote_client_addr == NULL) {
875                                 DBG_DEBUG("tsocket_address_unix_path "
876                                           "failed\n");
877                                 goto nomem;
878                         }
879                         remote_client_name =
880                                 talloc_strdup(info8, remote_client_addr);
881                         if (remote_client_name == NULL) {
882                                 DBG_DEBUG("talloc_strdup failed\n");
883                                 goto nomem;
884                         }
885                 }
886         }
887
888         info8->remote_client_addr = remote_client_addr;
889         info8->remote_client_name = remote_client_name;
890         info8->local_server_addr = local_server_addr;
891         info8->local_server_name = local_server_name;
892
893         *pinfo8 = info8;
894         return NT_STATUS_OK;
895
896 nomem:
897         status = NT_STATUS_NO_MEMORY;
898 fail:
899         TALLOC_FREE(info8);
900         return status;
901 }
902
903 struct rpc_host_bind_read_state {
904         struct tevent_context *ev;
905
906         int sock;
907         struct tstream_context *plain;
908         struct tstream_context *npa_stream;
909
910         struct ncacn_packet *pkt;
911         struct rpc_host_client *client;
912 };
913
914 static void rpc_host_bind_read_cleanup(
915         struct tevent_req *req, enum tevent_req_state req_state);
916 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
917 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
918
919 /*
920  * Wait for a bind packet from a client.
921  */
922 static struct tevent_req *rpc_host_bind_read_send(
923         TALLOC_CTX *mem_ctx,
924         struct tevent_context *ev,
925         enum dcerpc_transport_t transport,
926         int *psock,
927         const struct samba_sockaddr *peer_addr)
928 {
929         struct tevent_req *req = NULL, *subreq = NULL;
930         struct rpc_host_bind_read_state *state = NULL;
931         int rc, sock_dup;
932         NTSTATUS status;
933
934         req = tevent_req_create(
935                 mem_ctx, &state, struct rpc_host_bind_read_state);
936         if (req == NULL) {
937                 return NULL;
938         }
939         state->ev = ev;
940
941         state->sock = *psock;
942         *psock = -1;
943
944         tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
945
946         state->client = talloc_zero(state, struct rpc_host_client);
947         if (tevent_req_nomem(state->client, req)) {
948                 return tevent_req_post(req, ev);
949         }
950
951         /*
952          * Dup the socket to read the first RPC packet:
953          * tstream_bsd_existing_socket() takes ownership with
954          * autoclose, but we need to send "sock" down to our worker
955          * process later.
956          */
957         sock_dup = dup(state->sock);
958         if (sock_dup == -1) {
959                 tevent_req_error(req, errno);
960                 return tevent_req_post(req, ev);
961         }
962
963         rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
964         if (rc == -1) {
965                 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
966                           strerror(errno));
967                 tevent_req_error(req, errno);
968                 close(sock_dup);
969                 return tevent_req_post(req, ev);
970         }
971         /* as server we want to fail early */
972         tstream_bsd_fail_readv_first_error(state->plain, true);
973
974         if (transport == NCACN_NP) {
975                 subreq = tstream_npa_accept_existing_send(
976                         state,
977                         ev,
978                         state->plain,
979                         FILE_TYPE_MESSAGE_MODE_PIPE,
980                         0xff | 0x0400 | 0x0100,
981                         4096);
982                 if (tevent_req_nomem(subreq, req)) {
983                         return tevent_req_post(req, ev);
984                 }
985                 tevent_req_set_callback(
986                         subreq, rpc_host_bind_read_got_npa, req);
987                 return req;
988         }
989
990         status = rpc_host_generate_npa_info8_from_sock(
991                 state->client,
992                 transport,
993                 state->sock,
994                 peer_addr,
995                 &state->client->npa_info8);
996         if (!NT_STATUS_IS_OK(status)) {
997                 tevent_req_oom(req);
998                 return tevent_req_post(req, ev);
999         }
1000
1001         subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1002         if (tevent_req_nomem(subreq, req)) {
1003                 return tevent_req_post(req, ev);
1004         }
1005         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1006         return req;
1007 }
1008
1009 static void rpc_host_bind_read_cleanup(
1010         struct tevent_req *req, enum tevent_req_state req_state)
1011 {
1012         struct rpc_host_bind_read_state *state = tevent_req_data(
1013                 req, struct rpc_host_bind_read_state);
1014
1015         if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1016                 close(state->sock);
1017                 state->sock = -1;
1018         }
1019 }
1020
1021 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1022 {
1023         struct tevent_req *req = tevent_req_callback_data(
1024                 subreq, struct tevent_req);
1025         struct rpc_host_bind_read_state *state = tevent_req_data(
1026                 req, struct rpc_host_bind_read_state);
1027         struct named_pipe_auth_req_info8 *info8 = NULL;
1028         int ret, err;
1029
1030         ret = tstream_npa_accept_existing_recv(subreq,
1031                                                &err,
1032                                                state,
1033                                                &state->npa_stream,
1034                                                &info8,
1035                                                NULL,  /* transport */
1036                                                NULL,  /* remote_client_addr */
1037                                                NULL,  /* remote_client_name */
1038                                                NULL,  /* local_server_addr */
1039                                                NULL,  /* local_server_name */
1040                                                NULL); /* session_info */
1041         if (ret == -1) {
1042                 tevent_req_error(req, err);
1043                 return;
1044         }
1045
1046         state->client->npa_info8 = talloc_move(state->client, &info8);
1047
1048         subreq = dcerpc_read_ncacn_packet_send(
1049                 state, state->ev, state->npa_stream);
1050         if (tevent_req_nomem(subreq, req)) {
1051                 return;
1052         }
1053         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1054 }
1055
1056 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1057 {
1058         struct tevent_req *req = tevent_req_callback_data(
1059                 subreq, struct tevent_req);
1060         struct rpc_host_bind_read_state *state = tevent_req_data(
1061                 req, struct rpc_host_bind_read_state);
1062         struct ncacn_packet *pkt = NULL;
1063         NTSTATUS status;
1064
1065         status = dcerpc_read_ncacn_packet_recv(
1066                 subreq,
1067                 state->client,
1068                 &pkt,
1069                 &state->client->bind_packet);
1070         TALLOC_FREE(subreq);
1071         if (!NT_STATUS_IS_OK(status)) {
1072                 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1073                           nt_errstr(status));
1074                 tevent_req_error(req, EINVAL); /* TODO */
1075                 return;
1076         }
1077         state->pkt = talloc_move(state, &pkt);
1078
1079         tevent_req_done(req);
1080 }
1081
1082 static int rpc_host_bind_read_recv(
1083         struct tevent_req *req,
1084         TALLOC_CTX *mem_ctx,
1085         int *sock,
1086         struct rpc_host_client **client,
1087         struct ncacn_packet **bind_pkt)
1088 {
1089         struct rpc_host_bind_read_state *state = tevent_req_data(
1090                 req, struct rpc_host_bind_read_state);
1091         int err;
1092
1093         if (tevent_req_is_unix_error(req, &err)) {
1094                 tevent_req_received(req);
1095                 return err;
1096         }
1097
1098         *sock = state->sock;
1099         state->sock = -1;
1100
1101         *client = talloc_move(mem_ctx, &state->client);
1102         *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1103         tevent_req_received(req);
1104         return 0;
1105 }
1106
1107 /*
1108  * Start the given rpcd_* binary.
1109  */
1110 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1111 {
1112         struct rpc_work_process *worker = &server->workers[idx];
1113         char **argv = NULL;
1114         int ret = ENOMEM;
1115
1116         argv = str_list_make_empty(server);
1117         str_list_add_printf(
1118                 &argv, "%s", server->rpc_server_exe);
1119         str_list_add_printf(
1120                 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1121         str_list_add_printf(
1122                 &argv, "--worker-group=%"PRIu32, server->server_index);
1123         str_list_add_printf(
1124                 &argv, "--worker-index=%zu", idx);
1125         str_list_add_printf(
1126                 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1127         if (!is_default_dyn_LOGFILEBASE()) {
1128                 str_list_add_printf(
1129                         &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1130         }
1131         if (argv == NULL) {
1132                 ret = ENOMEM;
1133                 goto fail;
1134         }
1135
1136         worker->pid = fork();
1137         if (worker->pid == -1) {
1138                 ret = errno;
1139                 goto fail;
1140         }
1141         if (worker->pid == 0) {
1142                 /* Child. */
1143                 close(server->host->worker_stdin[1]);
1144                 ret = dup2(server->host->worker_stdin[0], 0);
1145                 if (ret != 0) {
1146                         exit(1);
1147                 }
1148                 execv(argv[0], argv);
1149                 _exit(1);
1150         }
1151
1152         DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1153                   server->rpc_server_exe,
1154                   idx,
1155                   (int)worker->pid);
1156
1157         ret = 0;
1158 fail:
1159         TALLOC_FREE(argv);
1160         return ret;
1161 }
1162
1163 /*
1164  * Find an rpcd_* worker for an external client, respect server->max_workers
1165  */
1166 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1167 {
1168         struct rpc_work_process *worker = NULL;
1169         struct rpc_work_process *perfect_worker = NULL;
1170         struct rpc_work_process *best_worker = NULL;
1171         size_t empty_slot = SIZE_MAX;
1172         size_t i;
1173
1174         for (i=0; i<server->max_workers; i++) {
1175                 worker = &server->workers[i];
1176
1177                 if (worker->pid == -1) {
1178                         empty_slot = MIN(empty_slot, i);
1179                         continue;
1180                 }
1181                 if (!worker->available) {
1182                         continue;
1183                 }
1184                 if (worker->num_associations == 0) {
1185                         /*
1186                          * We have an idle worker...
1187                          */
1188                         perfect_worker = worker;
1189                         break;
1190                 }
1191                 if (best_worker == NULL) {
1192                         /*
1193                          * It's busy, but the best so far...
1194                          */
1195                         best_worker = worker;
1196                         continue;
1197                 }
1198                 if (worker->num_associations < best_worker->num_associations) {
1199                         /*
1200                          * It's also busy, but has less association groups
1201                          * (logical clients)
1202                          */
1203                         best_worker = worker;
1204                         continue;
1205                 }
1206                 if (worker->num_associations > best_worker->num_associations) {
1207                         /*
1208                          * It's not better
1209                          */
1210                         continue;
1211                 }
1212                 /*
1213                  * Ok, with the same number of association groups
1214                  * we pick the one with the lowest number of connections
1215                  */
1216                 if (worker->num_connections < best_worker->num_connections) {
1217                         best_worker = worker;
1218                         continue;
1219                 }
1220         }
1221
1222         if (perfect_worker != NULL) {
1223                 return perfect_worker;
1224         }
1225
1226         if (empty_slot < SIZE_MAX) {
1227                 int ret = rpc_host_exec_worker(server, empty_slot);
1228                 if (ret != 0) {
1229                         DBG_WARNING("Could not fork worker: %s\n",
1230                                     strerror(ret));
1231                 }
1232                 return NULL;
1233         }
1234
1235         if (best_worker != NULL) {
1236                 return best_worker;
1237         }
1238
1239         return NULL;
1240 }
1241
1242 /*
1243  * Find an rpcd_* worker for an internal connection, possibly go beyond
1244  * server->max_workers
1245  */
1246 static struct rpc_work_process *rpc_host_find_idle_worker(
1247         struct rpc_server *server)
1248 {
1249         struct rpc_work_process *worker = NULL, *tmp = NULL;
1250         size_t i, num_workers = talloc_array_length(server->workers);
1251         size_t empty_slot = SIZE_MAX;
1252         int ret;
1253
1254         for (i=server->max_workers; i<num_workers; i++) {
1255                 worker = &server->workers[i];
1256
1257                 if (worker->pid == -1) {
1258                         empty_slot = MIN(empty_slot, i);
1259                         continue;
1260                 }
1261                 if (!worker->available) {
1262                         continue;
1263                 }
1264                 if (worker->num_associations == 0) {
1265                         return &server->workers[i];
1266                 }
1267         }
1268
1269         if (empty_slot < SIZE_MAX) {
1270                 ret = rpc_host_exec_worker(server, empty_slot);
1271                 if (ret != 0) {
1272                         DBG_WARNING("Could not fork worker: %s\n",
1273                                     strerror(ret));
1274                 }
1275                 return NULL;
1276         }
1277
1278         /*
1279          * All workers are busy. We need to expand the number of
1280          * workers because we were asked for an idle worker.
1281          */
1282         if (num_workers >= UINT16_MAX) {
1283                 /*
1284                  * The worker index would not fit into 16-bits
1285                  */
1286                 return NULL;
1287         }
1288         tmp = talloc_realloc(
1289                 server,
1290                 server->workers,
1291                 struct rpc_work_process,
1292                 num_workers+1);
1293         if (tmp == NULL) {
1294                 return NULL;
1295         }
1296         server->workers = tmp;
1297
1298         server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1299
1300         ret = rpc_host_exec_worker(server, num_workers);
1301         if (ret != 0) {
1302                 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1303         }
1304
1305         return NULL;
1306 }
1307
1308 /*
1309  * Find an rpcd_* process to talk to. Start a new one if necessary.
1310  */
1311 static void rpc_host_distribute_clients(struct rpc_server *server)
1312 {
1313         struct rpc_work_process *worker = NULL;
1314         struct rpc_host_pending_client *pending_client = NULL;
1315         uint32_t assoc_group_id;
1316         DATA_BLOB blob;
1317         struct iovec iov;
1318         enum ndr_err_code ndr_err;
1319         NTSTATUS status;
1320         const char *client_type = NULL;
1321
1322 again:
1323         pending_client = server->pending_clients;
1324         if (pending_client == NULL) {
1325                 DBG_DEBUG("No pending clients\n");
1326                 return;
1327         }
1328
1329         assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1330
1331         if (assoc_group_id != 0) {
1332                 size_t num_workers = talloc_array_length(server->workers);
1333                 uint16_t worker_index = assoc_group_id >> 16;
1334
1335                 client_type = "associated";
1336
1337                 if (worker_index >= num_workers) {
1338                         DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1339                                   assoc_group_id);
1340                         goto done;
1341                 }
1342                 worker = &server->workers[worker_index];
1343
1344                 if ((worker->pid == -1) || !worker->available) {
1345                         DBG_DEBUG("Requested worker index %"PRIu16": "
1346                                   "pid=%d, available=%d\n",
1347                                   worker_index,
1348                                   (int)worker->pid,
1349                                   (int)worker->available);
1350                         /*
1351                          * Pick a random one for a proper bind nack
1352                          */
1353                         client_type = "associated+lost";
1354                         worker = rpc_host_find_worker(server);
1355                 }
1356         } else {
1357                 struct auth_session_info_transport *session_info =
1358                         pending_client->client->npa_info8->session_info;
1359                 uint32_t flags = 0;
1360                 bool found;
1361
1362                 client_type = "new";
1363
1364                 found = security_token_find_npa_flags(
1365                         session_info->session_info->security_token,
1366                         &flags);
1367
1368                 /* fresh assoc group requested */
1369                 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1370                         client_type = "new+exclusive";
1371                         worker = rpc_host_find_idle_worker(server);
1372                 } else {
1373                         client_type = "new";
1374                         worker = rpc_host_find_worker(server);
1375                 }
1376         }
1377
1378         if (worker == NULL) {
1379                 DBG_DEBUG("No worker found for %s client\n", client_type);
1380                 return;
1381         }
1382
1383         DLIST_REMOVE(server->pending_clients, pending_client);
1384
1385         ndr_err = ndr_push_struct_blob(
1386                 &blob,
1387                 pending_client,
1388                 pending_client->client,
1389                 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1390         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1391                 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1392                             ndr_errstr(ndr_err));
1393                 goto done;
1394         }
1395
1396         DBG_INFO("Sending %s client %s to %d with "
1397                  "%"PRIu32" associations and %"PRIu32" connections\n",
1398                  client_type,
1399                  server->rpc_server_exe,
1400                  worker->pid,
1401                  worker->num_associations,
1402                  worker->num_connections);
1403
1404         iov = (struct iovec) {
1405                 .iov_base = blob.data, .iov_len = blob.length,
1406         };
1407
1408         status = messaging_send_iov(
1409                 server->host->msg_ctx,
1410                 pid_to_procid(worker->pid),
1411                 MSG_RPC_HOST_NEW_CLIENT,
1412                 &iov,
1413                 1,
1414                 &pending_client->sock,
1415                 1);
1416         if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1417                 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1418                           worker->pid);
1419                 DLIST_ADD(server->pending_clients, pending_client);
1420                 worker->available = false;
1421                 goto again;
1422         }
1423         if (!NT_STATUS_IS_OK(status)) {
1424                 DBG_DEBUG("messaging_send_iov failed: %s\n",
1425                           nt_errstr(status));
1426                 goto done;
1427         }
1428         if (assoc_group_id == 0) {
1429                 worker->num_associations += 1;
1430         }
1431         worker->num_connections += 1;
1432         TALLOC_FREE(worker->exit_timer);
1433
1434         TALLOC_FREE(server->host->np_helper_shutdown);
1435
1436 done:
1437         TALLOC_FREE(pending_client);
1438 }
1439
1440 static int rpc_host_pending_client_destructor(
1441         struct rpc_host_pending_client *p)
1442 {
1443         TALLOC_FREE(p->hangup_wait);
1444         if (p->sock != -1) {
1445                 close(p->sock);
1446                 p->sock = -1;
1447         }
1448         DLIST_REMOVE(p->server->pending_clients, p);
1449         return 0;
1450 }
1451
1452 /*
1453  * Exception condition handler before rpcd_* worker
1454  * is handling the socket. Either the client exited or
1455  * sent unexpected data after the initial bind.
1456  */
1457 static void rpc_host_client_exited(struct tevent_req *subreq)
1458 {
1459         struct rpc_host_pending_client *pending = tevent_req_callback_data(
1460                 subreq, struct rpc_host_pending_client);
1461         bool ok;
1462         int err;
1463
1464         ok = wait_for_read_recv(subreq, &err);
1465
1466         TALLOC_FREE(subreq);
1467         pending->hangup_wait = NULL;
1468
1469         if (ok) {
1470                 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1471         } else {
1472                 DBG_DEBUG("client exited with %s\n", strerror(err));
1473         }
1474         TALLOC_FREE(pending);
1475 }
1476
1477 struct rpc_iface_binding_map {
1478         struct ndr_syntax_id iface;
1479         char *bindings;
1480 };
1481
1482 static bool rpc_iface_binding_map_add_endpoint(
1483         TALLOC_CTX *mem_ctx,
1484         const struct rpc_host_endpoint *ep,
1485         struct rpc_host_iface_name *iface_names,
1486         struct rpc_iface_binding_map **pmaps)
1487 {
1488         const struct ndr_syntax_id mgmt_iface = {
1489                 {0xafa8bd80,
1490                  0x7d8a,
1491                  0x11c9,
1492                  {0xbe,0xf4},
1493                  {0x08,0x00,0x2b,0x10,0x29,0x89}
1494                 },
1495                 1.0};
1496
1497         struct rpc_iface_binding_map *maps = *pmaps;
1498         size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1499         char *binding_string = NULL;
1500         bool ok = false;
1501
1502         binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1503         if (binding_string == NULL) {
1504                 return false;
1505         }
1506
1507         for (i=0; i<num_ifaces; i++) {
1508                 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1509                 size_t j, num_maps = talloc_array_length(maps);
1510                 struct rpc_iface_binding_map *map = NULL;
1511                 char *p = NULL;
1512
1513                 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1514                         /*
1515                          * mgmt is offered everywhere, don't put it
1516                          * into epmdb.tdb.
1517                          */
1518                         continue;
1519                 }
1520
1521                 for (j=0; j<num_maps; j++) {
1522                         map = &maps[j];
1523                         if (ndr_syntax_id_equal(&map->iface, iface)) {
1524                                 break;
1525                         }
1526                 }
1527
1528                 if (j == num_maps) {
1529                         struct rpc_iface_binding_map *tmp = NULL;
1530                         struct rpc_host_iface_name *iface_name = NULL;
1531
1532                         iface_name = rpc_host_iface_names_find(
1533                                 iface_names, iface);
1534                         if (iface_name == NULL) {
1535                                 goto fail;
1536                         }
1537
1538                         tmp = talloc_realloc(
1539                                 mem_ctx,
1540                                 maps,
1541                                 struct rpc_iface_binding_map,
1542                                 num_maps+1);
1543                         if (tmp == NULL) {
1544                                 goto fail;
1545                         }
1546                         maps = tmp;
1547
1548                         map = &maps[num_maps];
1549                         *map = (struct rpc_iface_binding_map) {
1550                                 .iface = *iface,
1551                                 .bindings = talloc_move(
1552                                         maps, &iface_name->name),
1553                         };
1554                 }
1555
1556                 p = strv_find(map->bindings, binding_string);
1557                 if (p == NULL) {
1558                         int ret = strv_add(
1559                                 maps, &map->bindings, binding_string);
1560                         if (ret != 0) {
1561                                 goto fail;
1562                         }
1563                 }
1564         }
1565
1566         ok = true;
1567 fail:
1568         *pmaps = maps;
1569         return ok;
1570 }
1571
1572 static bool rpc_iface_binding_map_add_endpoints(
1573         TALLOC_CTX *mem_ctx,
1574         struct rpc_host_endpoint **endpoints,
1575         struct rpc_host_iface_name *iface_names,
1576         struct rpc_iface_binding_map **pbinding_maps)
1577 {
1578         size_t i, num_endpoints = talloc_array_length(endpoints);
1579
1580         for (i=0; i<num_endpoints; i++) {
1581                 bool ok = rpc_iface_binding_map_add_endpoint(
1582                         mem_ctx, endpoints[i], iface_names, pbinding_maps);
1583                 if (!ok) {
1584                         return false;
1585                 }
1586         }
1587         return true;
1588 }
1589
1590 static bool rpc_host_fill_epm_db(
1591         struct tdb_wrap *db,
1592         struct rpc_host_endpoint **endpoints,
1593         struct rpc_host_iface_name *iface_names)
1594 {
1595         struct rpc_iface_binding_map *maps = NULL;
1596         size_t i, num_maps;
1597         bool ret = false;
1598         bool ok;
1599
1600         ok = rpc_iface_binding_map_add_endpoints(
1601                 talloc_tos(), endpoints, iface_names, &maps);
1602         if (!ok) {
1603                 goto fail;
1604         }
1605
1606         num_maps = talloc_array_length(maps);
1607
1608         for (i=0; i<num_maps; i++) {
1609                 struct rpc_iface_binding_map *map = &maps[i];
1610                 struct ndr_syntax_id_buf buf;
1611                 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1612                 TDB_DATA value = {
1613                         .dptr = (uint8_t *)map->bindings,
1614                         .dsize = talloc_array_length(map->bindings),
1615                 };
1616                 int rc;
1617
1618                 rc = tdb_store(
1619                         db->tdb, string_term_tdb_data(keystr), value, 0);
1620                 if (rc == -1) {
1621                         DBG_DEBUG("tdb_store() failed: %s\n",
1622                                   tdb_errorstr(db->tdb));
1623                         goto fail;
1624                 }
1625         }
1626
1627         ret = true;
1628 fail:
1629         TALLOC_FREE(maps);
1630         return ret;
1631 }
1632
1633 struct rpc_server_setup_state {
1634         struct rpc_server *server;
1635 };
1636
1637 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1638
1639 /*
1640  * Async initialize state for all possible rpcd_* servers.
1641  * Note this does not start them.
1642  */
1643 static struct tevent_req *rpc_server_setup_send(
1644         TALLOC_CTX *mem_ctx,
1645         struct tevent_context *ev,
1646         struct rpc_host *host,
1647         const char *rpc_server_exe)
1648 {
1649         struct tevent_req *req = NULL, *subreq = NULL;
1650         struct rpc_server_setup_state *state = NULL;
1651         struct rpc_server *server = NULL;
1652
1653         req = tevent_req_create(
1654                 mem_ctx, &state, struct rpc_server_setup_state);
1655         if (req == NULL) {
1656                 return NULL;
1657         }
1658         state->server = talloc_zero(state, struct rpc_server);
1659         if (tevent_req_nomem(state->server, req)) {
1660                 return tevent_req_post(req, ev);
1661         }
1662
1663         server = state->server;
1664
1665         *server = (struct rpc_server) {
1666                 .host = host,
1667                 .server_index = UINT32_MAX,
1668                 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1669         };
1670         if (tevent_req_nomem(server->rpc_server_exe, req)) {
1671                 return tevent_req_post(req, ev);
1672         }
1673
1674         subreq = rpc_server_get_endpoints_send(
1675                 state,
1676                 ev,
1677                 rpc_server_exe,
1678                 host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1679         if (tevent_req_nomem(subreq, req)) {
1680                 return tevent_req_post(req, ev);
1681         }
1682         tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1683         return req;
1684 }
1685
1686 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1687 {
1688         struct tevent_req *req = tevent_req_callback_data(
1689                 subreq, struct tevent_req);
1690         struct rpc_server_setup_state *state = tevent_req_data(
1691                 req, struct rpc_server_setup_state);
1692         struct rpc_server *server = state->server;
1693         int ret;
1694         size_t i, num_endpoints;
1695         bool ok;
1696
1697         ret = rpc_server_get_endpoints_recv(
1698                 subreq,
1699                 server,
1700                 &server->endpoints,
1701                 &server->iface_names,
1702                 &server->max_workers,
1703                 &server->idle_seconds);
1704         TALLOC_FREE(subreq);
1705         if (ret != 0) {
1706                 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1707                 return;
1708         }
1709
1710         server->workers = talloc_array(
1711                 server, struct rpc_work_process, server->max_workers);
1712         if (tevent_req_nomem(server->workers, req)) {
1713                 return;
1714         }
1715
1716         for (i=0; i<server->max_workers; i++) {
1717                 /* mark as not yet created */
1718                 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1719         }
1720
1721         num_endpoints = talloc_array_length(server->endpoints);
1722
1723         for (i=0; i<num_endpoints; i++) {
1724                 struct rpc_host_endpoint *e = server->endpoints[i];
1725                 NTSTATUS status;
1726                 size_t j;
1727
1728                 e->server = server;
1729
1730                 status = dcesrv_create_binding_sockets(
1731                         e->binding, e, &e->num_fds, &e->fds);
1732                 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1733                         continue;
1734                 }
1735                 if (tevent_req_nterror(req, status)) {
1736                         DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1737                                   nt_errstr(status));
1738                         return;
1739                 }
1740
1741                 for (j=0; j<e->num_fds; j++) {
1742                         ret = listen(e->fds[j], 256);
1743                         if (ret == -1) {
1744                                 tevent_req_nterror(
1745                                         req, map_nt_error_from_unix(errno));
1746                                 return;
1747                         }
1748                 }
1749         }
1750
1751         ok = rpc_host_fill_epm_db(
1752                 server->host->epmdb, server->endpoints, server->iface_names);
1753         if (!ok) {
1754                 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1755         }
1756
1757         tevent_req_done(req);
1758 }
1759
1760 static NTSTATUS rpc_server_setup_recv(
1761         struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1762 {
1763         struct rpc_server_setup_state *state = tevent_req_data(
1764                 req, struct rpc_server_setup_state);
1765         NTSTATUS status;
1766
1767         if (tevent_req_is_nterror(req, &status)) {
1768                 tevent_req_received(req);
1769                 return status;
1770         }
1771
1772         *server = talloc_move(mem_ctx, &state->server);
1773         tevent_req_received(req);
1774         return NT_STATUS_OK;
1775 }
1776
1777 /*
1778  * rpcd_* died. Called from SIGCHLD handler.
1779  */
1780 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1781 {
1782         size_t i, num_servers = talloc_array_length(host->servers);
1783         struct rpc_work_process *worker = NULL;
1784         bool found_pid = false;
1785         bool have_active_worker = false;
1786
1787         for (i=0; i<num_servers; i++) {
1788                 struct rpc_server *server = host->servers[i];
1789                 size_t j, num_workers;
1790
1791                 if (server == NULL) {
1792                         /* SIGCHLD for --list-interfaces run */
1793                         continue;
1794                 }
1795
1796                 num_workers = talloc_array_length(server->workers);
1797
1798                 for (j=0; j<num_workers; j++) {
1799                         worker = &server->workers[j];
1800                         if (worker->pid == pid) {
1801                                 found_pid = true;
1802                                 worker->pid = -1;
1803                                 worker->available = false;
1804                         }
1805
1806                         if (worker->pid != -1) {
1807                                 have_active_worker = true;
1808                         }
1809                 }
1810         }
1811
1812         if (!found_pid) {
1813                 DBG_WARNING("No worker with PID %d\n", (int)pid);
1814                 return;
1815         }
1816
1817         if (!have_active_worker && host->np_helper) {
1818                 /*
1819                  * We have nothing left to do as an np_helper.
1820                  * Terminate ourselves (samba-dcerpcd). We will
1821                  * be restarted on demand anyway.
1822                  */
1823                 DBG_DEBUG("Exiting idle np helper\n");
1824                 exit(0);
1825         }
1826 }
1827
1828 /*
1829  * rpcd_* died.
1830  */
1831 static void rpc_host_sigchld(
1832         struct tevent_context *ev,
1833         struct tevent_signal *se,
1834         int signum,
1835         int count,
1836         void *siginfo,
1837         void *private_data)
1838 {
1839         struct rpc_host *state = talloc_get_type_abort(
1840                 private_data, struct rpc_host);
1841         pid_t pid;
1842         int wstatus;
1843
1844         while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1845                 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1846                 rpc_worker_exited(state, pid);
1847         }
1848 }
1849
1850 /*
1851  * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1852  */
1853 static void rpc_host_exit_worker(
1854         struct tevent_context *ev,
1855         struct tevent_timer *te,
1856         struct timeval current_time,
1857         void *private_data)
1858 {
1859         struct rpc_server *server = talloc_get_type_abort(
1860                 private_data, struct rpc_server);
1861         size_t i, num_workers = talloc_array_length(server->workers);
1862
1863         /*
1864          * Scan for the right worker. We don't have too many of those,
1865          * and maintaining an index would be more data structure effort.
1866          */
1867
1868         for (i=0; i<num_workers; i++) {
1869                 struct rpc_work_process *w = &server->workers[i];
1870                 NTSTATUS status;
1871
1872                 if (w->exit_timer != te) {
1873                         continue;
1874                 }
1875                 w->exit_timer = NULL;
1876
1877                 SMB_ASSERT(w->num_associations == 0);
1878
1879                 status = messaging_send(
1880                         server->host->msg_ctx,
1881                         pid_to_procid(w->pid),
1882                         MSG_SHUTDOWN,
1883                         NULL);
1884                 if (!NT_STATUS_IS_OK(status)) {
1885                         DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1886                                   nt_errstr(status));
1887                 }
1888
1889                 w->available = false;
1890                 break;
1891         }
1892 }
1893
1894 /*
1895  * rcpd_* worker replied with its status.
1896  */
1897 static void rpc_host_child_status_recv(
1898         struct messaging_context *msg,
1899         void *private_data,
1900         uint32_t msg_type,
1901         struct server_id server_id,
1902         DATA_BLOB *data)
1903 {
1904         struct rpc_host *host = talloc_get_type_abort(
1905                 private_data, struct rpc_host);
1906         size_t num_servers = talloc_array_length(host->servers);
1907         struct rpc_server *server = NULL;
1908         size_t num_workers;
1909         pid_t src_pid = procid_to_pid(&server_id);
1910         struct rpc_work_process *worker = NULL;
1911         struct rpc_worker_status status_message;
1912         enum ndr_err_code ndr_err;
1913
1914         ndr_err = ndr_pull_struct_blob_all_noalloc(
1915                 data,
1916                 &status_message,
1917                 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1918         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1919                 struct server_id_buf buf;
1920                 DBG_WARNING("Got invalid message from pid %s\n",
1921                             server_id_str_buf(server_id, &buf));
1922                 return;
1923         }
1924         if (DEBUGLEVEL >= 10) {
1925                 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1926         }
1927
1928         if (status_message.server_index >= num_servers) {
1929                 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1930                             "num_servers=%zu\n",
1931                             status_message.server_index,
1932                             num_servers);
1933                 return;
1934         }
1935
1936         server = host->servers[status_message.server_index];
1937
1938         num_workers = talloc_array_length(server->workers);
1939         if (status_message.worker_index >= num_workers) {
1940                 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1941                             "num_workers=%zu\n",
1942                             status_message.worker_index,
1943                             num_workers);
1944                 return;
1945         }
1946         worker = &server->workers[status_message.worker_index];
1947
1948         if (src_pid != worker->pid) {
1949                 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1950                             status_message.worker_index,
1951                             (int)src_pid,
1952                             worker->pid);
1953                 return;
1954         }
1955
1956         worker->available = true;
1957         worker->num_associations = status_message.num_association_groups;
1958         worker->num_connections = status_message.num_connections;
1959
1960         if (worker->num_associations != 0) {
1961                 TALLOC_FREE(worker->exit_timer);
1962         } else {
1963                 worker->exit_timer = tevent_add_timer(
1964                         messaging_tevent_context(msg),
1965                         server->workers,
1966                         tevent_timeval_current_ofs(server->idle_seconds, 0),
1967                         rpc_host_exit_worker,
1968                         server);
1969                 /* No NULL check, it's not fatal if this does not work */
1970         }
1971
1972         rpc_host_distribute_clients(server);
1973 }
1974
1975 /*
1976  * samba-dcerpcd has been asked to shutdown.
1977  * Mark the initial tevent_req as done so we
1978  * exit the event loop.
1979  */
1980 static void rpc_host_msg_shutdown(
1981         struct messaging_context *msg,
1982         void *private_data,
1983         uint32_t msg_type,
1984         struct server_id server_id,
1985         DATA_BLOB *data)
1986 {
1987         struct tevent_req *req = talloc_get_type_abort(
1988                 private_data, struct tevent_req);
1989         tevent_req_done(req);
1990 }
1991
1992 /*
1993  * Only match directory entries starting in rpcd_
1994  */
1995 static int rpcd_filter(const struct dirent *d)
1996 {
1997         int match = fnmatch("rpcd_*", d->d_name, 0);
1998         return (match == 0) ? 1 : 0;
1999 }
2000
2001 /*
2002  * Scan the given libexecdir for rpcd_* services
2003  * and return them as a strv list.
2004  */
2005 static int rpc_host_list_servers(
2006         const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2007 {
2008         char *servers = NULL;
2009         struct dirent **namelist = NULL;
2010         int i, num_servers;
2011         int ret = ENOMEM;
2012
2013         num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2014         if (num_servers == -1) {
2015                 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2016                 return errno;
2017         }
2018
2019         for (i=0; i<num_servers; i++) {
2020                 char *exe = talloc_asprintf(
2021                         mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2022                 if (exe == NULL) {
2023                         goto fail;
2024                 }
2025
2026                 ret = strv_add(mem_ctx, &servers, exe);
2027                 TALLOC_FREE(exe);
2028                 if (ret != 0) {
2029                         goto fail;
2030                 }
2031         }
2032 fail:
2033         for (i=0; i<num_servers; i++) {
2034                 SAFE_FREE(namelist[i]);
2035         }
2036         SAFE_FREE(namelist);
2037
2038         if (ret != 0) {
2039                 TALLOC_FREE(servers);
2040                 return ret;
2041         }
2042         *pservers = servers;
2043         return 0;
2044 }
2045
2046 struct rpc_host_endpoint_accept_state {
2047         struct tevent_context *ev;
2048         struct rpc_host_endpoint *endpoint;
2049 };
2050
2051 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2052 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2053
2054 /*
2055  * Asynchronously wait for a DCERPC connection from a client.
2056  */
2057 static struct tevent_req *rpc_host_endpoint_accept_send(
2058         TALLOC_CTX *mem_ctx,
2059         struct tevent_context *ev,
2060         struct rpc_host_endpoint *endpoint)
2061 {
2062         struct tevent_req *req = NULL;
2063         struct rpc_host_endpoint_accept_state *state = NULL;
2064         size_t i;
2065
2066         req = tevent_req_create(
2067                 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2068         if (req == NULL) {
2069                 return NULL;
2070         }
2071         state->ev = ev;
2072         state->endpoint = endpoint;
2073
2074         for (i=0; i<endpoint->num_fds; i++) {
2075                 struct tevent_req *subreq = NULL;
2076
2077                 subreq = accept_send(state, ev, endpoint->fds[i]);
2078                 if (tevent_req_nomem(subreq, req)) {
2079                         return tevent_req_post(req, ev);
2080                 }
2081                 tevent_req_set_callback(
2082                         subreq, rpc_host_endpoint_accept_accepted, req);
2083         }
2084
2085         return req;
2086 }
2087
2088 /*
2089  * Accept a DCERPC connection from a client.
2090  */
2091 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2092 {
2093         struct tevent_req *req = tevent_req_callback_data(
2094                 subreq, struct tevent_req);
2095         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2096                 req, struct rpc_host_endpoint_accept_state);
2097         struct rpc_host_endpoint *endpoint = state->endpoint;
2098         int sock, listen_sock, err;
2099         struct samba_sockaddr peer_addr;
2100
2101         sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2102         TALLOC_FREE(subreq);
2103         if (sock == -1) {
2104                 /* What to do here? Just ignore the error and retry? */
2105                 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2106                 tevent_req_error(req, err);
2107                 return;
2108         }
2109
2110         subreq = accept_send(state, state->ev, listen_sock);
2111         if (tevent_req_nomem(subreq, req)) {
2112                 close(sock);
2113                 sock = -1;
2114                 return;
2115         }
2116         tevent_req_set_callback(
2117                 subreq, rpc_host_endpoint_accept_accepted, req);
2118
2119         subreq = rpc_host_bind_read_send(
2120                 state,
2121                 state->ev,
2122                 dcerpc_binding_get_transport(endpoint->binding),
2123                 &sock,
2124                 &peer_addr);
2125         if (tevent_req_nomem(subreq, req)) {
2126                 return;
2127         }
2128         tevent_req_set_callback(
2129                 subreq, rpc_host_endpoint_accept_got_bind, req);
2130 }
2131
2132 /*
2133  * Client sent us a DCERPC bind packet.
2134  */
2135 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2136 {
2137         struct tevent_req *req = tevent_req_callback_data(
2138                 subreq, struct tevent_req);
2139         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2140                 req, struct rpc_host_endpoint_accept_state);
2141         struct rpc_host_endpoint *endpoint = state->endpoint;
2142         struct rpc_server *server = endpoint->server;
2143         struct rpc_host_pending_client *pending = NULL;
2144         struct rpc_host_client *client = NULL;
2145         struct ncacn_packet *bind_pkt = NULL;
2146         int ret;
2147         int sock=-1;
2148
2149         ret = rpc_host_bind_read_recv(
2150                 subreq, state, &sock, &client, &bind_pkt);
2151         TALLOC_FREE(subreq);
2152         if (ret != 0) {
2153                 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2154                           strerror(ret));
2155                 goto fail;
2156         }
2157
2158         client->binding = dcerpc_binding_string(client, endpoint->binding);
2159         if (client->binding == NULL) {
2160                 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2161                 goto fail;
2162         }
2163
2164         pending = talloc_zero(server, struct rpc_host_pending_client);
2165         if (pending == NULL) {
2166                 DBG_WARNING("talloc failed, dropping client\n");
2167                 goto fail;
2168         }
2169         pending->server = server;
2170         pending->sock = sock;
2171         pending->bind_pkt = talloc_move(pending, &bind_pkt);
2172         pending->client = talloc_move(pending, &client);
2173         talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2174         sock = -1;
2175
2176         pending->hangup_wait = wait_for_read_send(
2177                 pending, state->ev, pending->sock, true);
2178         if (pending->hangup_wait == NULL) {
2179                 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2180                 TALLOC_FREE(pending);
2181                 return;
2182         }
2183         tevent_req_set_callback(
2184                 pending->hangup_wait, rpc_host_client_exited, pending);
2185
2186         DLIST_ADD_END(server->pending_clients, pending);
2187         rpc_host_distribute_clients(server);
2188         return;
2189
2190 fail:
2191         TALLOC_FREE(client);
2192         if (sock != -1) {
2193                 close(sock);
2194         }
2195 }
2196
2197 static int rpc_host_endpoint_accept_recv(
2198         struct tevent_req *req, struct rpc_host_endpoint **ep)
2199 {
2200         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2201                 req, struct rpc_host_endpoint_accept_state);
2202
2203         *ep = state->endpoint;
2204
2205         return tevent_req_simple_recv_unix(req);
2206 }
2207
2208 /*
2209  * Full state for samba-dcerpcd. Everything else
2210  * is hung off this.
2211  */
2212 struct rpc_host_state {
2213         struct tevent_context *ev;
2214         struct rpc_host *host;
2215
2216         bool is_ready;
2217         const char *daemon_ready_progname;
2218         struct tevent_immediate *ready_signal_immediate;
2219         int *ready_signal_fds;
2220
2221         size_t num_servers;
2222         size_t num_prepared;
2223 };
2224
2225 /*
2226  * Tell whoever invoked samba-dcerpcd we're ready to
2227  * serve.
2228  */
2229 static void rpc_host_report_readiness(
2230         struct tevent_context *ev,
2231         struct tevent_immediate *im,
2232         void *private_data)
2233 {
2234         struct rpc_host_state *state = talloc_get_type_abort(
2235                 private_data, struct rpc_host_state);
2236         size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2237
2238         if (!state->is_ready) {
2239                 DBG_DEBUG("Not yet ready\n");
2240                 return;
2241         }
2242
2243         for (i=0; i<num_fds; i++) {
2244                 uint8_t byte = 0;
2245                 ssize_t nwritten;
2246
2247                 do {
2248                         nwritten = write(
2249                                 state->ready_signal_fds[i],
2250                                 (void *)&byte,
2251                                 sizeof(byte));
2252                 } while ((nwritten == -1) && (errno == EINTR));
2253
2254                 close(state->ready_signal_fds[i]);
2255         }
2256
2257         TALLOC_FREE(state->ready_signal_fds);
2258 }
2259
2260 /*
2261  * Respond to a "are you ready" message.
2262  */
2263 static bool rpc_host_ready_signal_filter(
2264         struct messaging_rec *rec, void *private_data)
2265 {
2266         struct rpc_host_state *state = talloc_get_type_abort(
2267                 private_data, struct rpc_host_state);
2268         size_t num_fds = talloc_array_length(state->ready_signal_fds);
2269         int *tmp = NULL;
2270
2271         if (rec->msg_type != MSG_DAEMON_READY_FD) {
2272                 return false;
2273         }
2274         if (rec->num_fds != 1) {
2275                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2276                 return false;
2277         }
2278
2279         if (num_fds + 1 < num_fds) {
2280                 return false;
2281         }
2282         tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2283         if (tmp == NULL) {
2284                 return false;
2285         }
2286         state->ready_signal_fds = tmp;
2287
2288         state->ready_signal_fds[num_fds] = rec->fds[0];
2289         rec->fds[0] = -1;
2290
2291         tevent_schedule_immediate(
2292                 state->ready_signal_immediate,
2293                 state->ev,
2294                 rpc_host_report_readiness,
2295                 state);
2296
2297         return false;
2298 }
2299
2300 /*
2301  * Respond to a "what is your status" message.
2302  */
2303 static bool rpc_host_dump_status_filter(
2304         struct messaging_rec *rec, void *private_data)
2305 {
2306         struct rpc_host_state *state = talloc_get_type_abort(
2307                 private_data, struct rpc_host_state);
2308         struct rpc_host *host = state->host;
2309         struct rpc_server **servers = host->servers;
2310         size_t i, num_servers = talloc_array_length(servers);
2311         FILE *f = NULL;
2312         int fd;
2313
2314         if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2315                 return false;
2316         }
2317         if (rec->num_fds != 1) {
2318                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2319                 return false;
2320         }
2321
2322         fd = dup(rec->fds[0]);
2323         if (fd == -1) {
2324                 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2325                           rec->fds[0],
2326                           strerror(errno));
2327                 return false;
2328         }
2329
2330         f = fdopen(fd, "w");
2331         if (f == NULL) {
2332                 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2333                 close(fd);
2334                 return false;
2335         }
2336
2337         for (i=0; i<num_servers; i++) {
2338                 struct rpc_server *server = servers[i];
2339                 size_t j, num_workers = talloc_array_length(server->workers);
2340                 size_t active_workers = 0;
2341
2342                 for (j=0; j<num_workers; j++) {
2343                         if (server->workers[j].pid != -1) {
2344                                 active_workers += 1;
2345                         }
2346                 }
2347
2348                 fprintf(f,
2349                         "%s: active_workers=%zu\n",
2350                         server->rpc_server_exe,
2351                         active_workers);
2352
2353                 for (j=0; j<num_workers; j++) {
2354                         struct rpc_work_process *w = &server->workers[j];
2355
2356                         if (w->pid == (pid_t)-1) {
2357                                 continue;
2358                         }
2359
2360                         fprintf(f,
2361                                 " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2362                                 j,
2363                                 (int)w->pid,
2364                                 w->num_associations,
2365                                 w->num_connections);
2366                 }
2367         }
2368
2369         fclose(f);
2370
2371         return false;
2372 }
2373
2374 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2375 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2376
2377 /*
2378  * Async startup for samba-dcerpcd.
2379  */
2380 static struct tevent_req *rpc_host_send(
2381         TALLOC_CTX *mem_ctx,
2382         struct tevent_context *ev,
2383         struct messaging_context *msg_ctx,
2384         char *servers,
2385         int ready_signal_fd,
2386         const char *daemon_ready_progname,
2387         bool is_np_helper)
2388 {
2389         struct tevent_req *req = NULL, *subreq = NULL;
2390         struct rpc_host_state *state = NULL;
2391         struct rpc_host *host = NULL;
2392         struct tevent_signal *se = NULL;
2393         char *epmdb_path = NULL;
2394         char *exe = NULL;
2395         size_t i, num_servers = strv_count(servers);
2396         NTSTATUS status;
2397         int ret;
2398
2399         req = tevent_req_create(req, &state, struct rpc_host_state);
2400         if (req == NULL) {
2401                 return NULL;
2402         }
2403         state->ev = ev;
2404         state->daemon_ready_progname = daemon_ready_progname;
2405
2406         state->ready_signal_immediate = tevent_create_immediate(state);
2407         if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2408                 return tevent_req_post(req, ev);
2409         }
2410
2411         if (ready_signal_fd != -1) {
2412                 state->ready_signal_fds = talloc_array(state, int, 1);
2413                 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2414                         return tevent_req_post(req, ev);
2415                 }
2416                 state->ready_signal_fds[0] = ready_signal_fd;
2417         }
2418
2419         state->host = talloc_zero(state, struct rpc_host);
2420         if (tevent_req_nomem(state->host, req)) {
2421                 return tevent_req_post(req, ev);
2422         }
2423         host = state->host;
2424
2425         host->msg_ctx = msg_ctx;
2426         host->np_helper = is_np_helper;
2427
2428         ret = pipe(host->worker_stdin);
2429         if (ret == -1) {
2430                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2431                 return tevent_req_post(req, ev);
2432         }
2433
2434         host->servers = talloc_zero_array(
2435                 host, struct rpc_server *, num_servers);
2436         if (tevent_req_nomem(host->servers, req)) {
2437                 return tevent_req_post(req, ev);
2438         }
2439
2440         se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2441         if (tevent_req_nomem(se, req)) {
2442                 return tevent_req_post(req, ev);
2443         }
2444         BlockSignals(false, SIGCHLD);
2445
2446         status = messaging_register(
2447                 msg_ctx,
2448                 host,
2449                 MSG_RPC_WORKER_STATUS,
2450                 rpc_host_child_status_recv);
2451         if (tevent_req_nterror(req, status)) {
2452                 return tevent_req_post(req, ev);
2453         }
2454
2455         status = messaging_register(
2456                 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2457         if (tevent_req_nterror(req, status)) {
2458                 return tevent_req_post(req, ev);
2459         }
2460
2461         subreq = messaging_filtered_read_send(
2462                 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2463         if (tevent_req_nomem(subreq, req)) {
2464                 return tevent_req_post(req, ev);
2465         }
2466
2467         subreq = messaging_filtered_read_send(
2468                 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2469         if (tevent_req_nomem(subreq, req)) {
2470                 return tevent_req_post(req, ev);
2471         }
2472
2473         epmdb_path = lock_path(state, "epmdb.tdb");
2474         if (tevent_req_nomem(epmdb_path, req)) {
2475                 return tevent_req_post(req, ev);
2476         }
2477
2478         host->epmdb = tdb_wrap_open(
2479                 host,
2480                 epmdb_path,
2481                 0,
2482                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2483                 O_RDWR|O_CREAT,
2484                 0644);
2485         if (host->epmdb == NULL) {
2486                 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2487                           epmdb_path,
2488                           strerror(errno));
2489                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2490                 return tevent_req_post(req, ev);
2491         }
2492         TALLOC_FREE(epmdb_path);
2493
2494         for (exe = strv_next(servers, exe), i = 0;
2495              exe != NULL;
2496              exe = strv_next(servers, exe), i++) {
2497
2498                 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2499
2500                 subreq = rpc_server_setup_send(
2501                         state,
2502                         ev,
2503                         host,
2504                         exe);
2505                 if (tevent_req_nomem(subreq, req)) {
2506                         return tevent_req_post(req, ev);
2507                 }
2508                 tevent_req_set_callback(
2509                         subreq, rpc_host_server_setup_done, req);
2510         }
2511
2512         return req;
2513 }
2514
2515 /*
2516  * Timer function called after we were initialized but no one
2517  * connected. Shutdown.
2518  */
2519 static void rpc_host_shutdown(
2520         struct tevent_context *ev,
2521         struct tevent_timer *te,
2522         struct timeval current_time,
2523         void *private_data)
2524 {
2525         struct tevent_req *req = talloc_get_type_abort(
2526                 private_data, struct tevent_req);
2527         DBG_DEBUG("Nobody connected -- shutting down\n");
2528         tevent_req_done(req);
2529 }
2530
2531 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2532 {
2533         struct tevent_req *req = tevent_req_callback_data(
2534                 subreq, struct tevent_req);
2535         struct rpc_host_state *state = tevent_req_data(
2536                 req, struct rpc_host_state);
2537         struct rpc_server *server = NULL;
2538         struct rpc_host *host = state->host;
2539         size_t i, num_servers = talloc_array_length(host->servers);
2540         NTSTATUS status;
2541
2542         status = rpc_server_setup_recv(subreq, host, &server);
2543         TALLOC_FREE(subreq);
2544         if (!NT_STATUS_IS_OK(status)) {
2545                 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2546                           nt_errstr(status));
2547                 host->servers = talloc_realloc(
2548                         host,
2549                         host->servers,
2550                         struct rpc_server *,
2551                         num_servers-1);
2552                 return;
2553         }
2554
2555         server->server_index = state->num_prepared;
2556         host->servers[state->num_prepared] = server;
2557
2558         state->num_prepared += 1;
2559
2560         if (state->num_prepared < num_servers) {
2561                 return;
2562         }
2563
2564         for (i=0; i<num_servers; i++) {
2565                 size_t j, num_endpoints;
2566
2567                 server = host->servers[i];
2568                 num_endpoints = talloc_array_length(server->endpoints);
2569
2570                 for (j=0; j<num_endpoints; j++) {
2571                         subreq = rpc_host_endpoint_accept_send(
2572                                 state, state->ev, server->endpoints[j]);
2573                         if (tevent_req_nomem(subreq, req)) {
2574                                 return;
2575                         }
2576                         tevent_req_set_callback(
2577                                 subreq, rpc_host_endpoint_failed, req);
2578                 }
2579         }
2580
2581         state->is_ready = true;
2582
2583         if (state->daemon_ready_progname != NULL) {
2584                 daemon_ready(state->daemon_ready_progname);
2585         }
2586
2587         if (host->np_helper) {
2588                 /*
2589                  * If we're started as an np helper, and no one talks to
2590                  * us within 10 seconds, just shut ourselves down.
2591                  */
2592                 host->np_helper_shutdown = tevent_add_timer(
2593                         state->ev,
2594                         state,
2595                         timeval_current_ofs(10, 0),
2596                         rpc_host_shutdown,
2597                         req);
2598                 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2599                         return;
2600                 }
2601         }
2602
2603         tevent_schedule_immediate(
2604                 state->ready_signal_immediate,
2605                 state->ev,
2606                 rpc_host_report_readiness,
2607                 state);
2608 }
2609
2610 /*
2611  * Log accept fail on an endpoint.
2612  */
2613 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2614 {
2615         struct tevent_req *req = tevent_req_callback_data(
2616                 subreq, struct tevent_req);
2617         struct rpc_host_state *state = tevent_req_data(
2618                 req, struct rpc_host_state);
2619         struct rpc_host_endpoint *endpoint = NULL;
2620         char *binding_string = NULL;
2621         int ret;
2622
2623         ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2624         TALLOC_FREE(subreq);
2625
2626         binding_string = dcerpc_binding_string(state, endpoint->binding);
2627         DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2628                   binding_string,
2629                   strerror(ret));
2630         TALLOC_FREE(binding_string);
2631 }
2632
2633 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2634 {
2635         return tevent_req_simple_recv_ntstatus(req);
2636 }
2637
2638 static int rpc_host_pidfile_create(
2639         struct messaging_context *msg_ctx,
2640         const char *progname,
2641         int ready_signal_fd)
2642 {
2643         const char *piddir = lp_pid_directory();
2644         size_t len = strlen(piddir) + strlen(progname) + 6;
2645         char pidFile[len];
2646         pid_t existing_pid;
2647         int fd, ret;
2648
2649         snprintf(pidFile,
2650                  sizeof(pidFile),
2651                  "%s/%s.pid",
2652                  piddir, progname);
2653
2654         ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2655         if (ret == 0) {
2656                 /* leak fd */
2657                 return 0;
2658         }
2659
2660         if (ret != EAGAIN) {
2661                 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2662                           strerror(ret));
2663                 return ret;
2664         }
2665
2666         DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2667
2668         if (ready_signal_fd != -1) {
2669                 NTSTATUS status = messaging_send_iov(
2670                         msg_ctx,
2671                         pid_to_procid(existing_pid),
2672                         MSG_DAEMON_READY_FD,
2673                         NULL,
2674                         0,
2675                         &ready_signal_fd,
2676                         1);
2677                 if (!NT_STATUS_IS_OK(status)) {
2678                         DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2679                                   nt_errstr(status));
2680                 }
2681         }
2682
2683         return EAGAIN;
2684 }
2685
2686 static void samba_dcerpcd_stdin_handler(
2687         struct tevent_context *ev,
2688         struct tevent_fd *fde,
2689         uint16_t flags,
2690         void *private_data)
2691 {
2692         struct tevent_req *req = talloc_get_type_abort(
2693                 private_data, struct tevent_req);
2694         char c;
2695
2696         if (read(0, &c, 1) != 1) {
2697                 /* we have reached EOF on stdin, which means the
2698                    parent has exited. Shutdown the server */
2699                 tevent_req_done(req);
2700         }
2701 }
2702
2703 /*
2704  * samba-dcerpcd microservice startup !
2705  */
2706 int main(int argc, const char *argv[])
2707 {
2708         const struct loadparm_substitution *lp_sub =
2709                 loadparm_s3_global_substitution();
2710         const char *progname = getprogname();
2711         TALLOC_CTX *frame = NULL;
2712         struct tevent_context *ev_ctx = NULL;
2713         struct messaging_context *msg_ctx = NULL;
2714         struct tevent_req *req = NULL;
2715         char *servers = NULL;
2716         const char *arg = NULL;
2717         size_t num_servers;
2718         poptContext pc;
2719         int ret, err;
2720         NTSTATUS status;
2721         bool log_stdout;
2722         bool ok;
2723
2724         int libexec_rpcds = 0;
2725         int np_helper = 0;
2726         int ready_signal_fd = -1;
2727
2728         struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2729         struct poptOption long_options[] = {
2730                 POPT_AUTOHELP
2731                 {
2732                         .longName   = "libexec-rpcds",
2733                         .argInfo    = POPT_ARG_NONE,
2734                         .arg        = &libexec_rpcds,
2735                         .descrip    = "Use all rpcds in libexec",
2736                 },
2737                 {
2738                         .longName   = "ready-signal-fd",
2739                         .argInfo    = POPT_ARG_INT,
2740                         .arg        = &ready_signal_fd,
2741                         .descrip    = "fd to close when initialized",
2742                 },
2743                 {
2744                         .longName   = "np-helper",
2745                         .argInfo    = POPT_ARG_NONE,
2746                         .arg        = &np_helper,
2747                         .descrip    = "Internal named pipe server",
2748                 },
2749                 POPT_COMMON_SAMBA
2750                 POPT_COMMON_DAEMON
2751                 POPT_COMMON_VERSION
2752                 POPT_TABLEEND
2753         };
2754
2755         {
2756                 const char *fd_params[] = { "ready-signal-fd", };
2757
2758                 closefrom_except_fd_params(
2759                         3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2760         }
2761
2762         talloc_enable_null_tracking();
2763         frame = talloc_stackframe();
2764         umask(0);
2765         sec_init();
2766         smb_init_locale();
2767
2768         ok = samba_cmdline_init(frame,
2769                                 SAMBA_CMDLINE_CONFIG_SERVER,
2770                                 true /* require_smbconf */);
2771         if (!ok) {
2772                 DBG_ERR("Failed to init cmdline parser!\n");
2773                 TALLOC_FREE(frame);
2774                 exit(ENOMEM);
2775         }
2776
2777         pc = samba_popt_get_context(getprogname(),
2778                                     argc,
2779                                     argv,
2780                                     long_options,
2781                                     0);
2782         if (pc == NULL) {
2783                 DBG_ERR("Failed to setup popt context!\n");
2784                 TALLOC_FREE(frame);
2785                 exit(1);
2786         }
2787
2788         poptSetOtherOptionHelp(
2789                 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2790
2791         ret = poptGetNextOpt(pc);
2792
2793         if (ret != -1) {
2794                 if (ret >= 0) {
2795                         fprintf(stderr,
2796                                 "\nGot unexpected option %d\n",
2797                                 ret);
2798                 } else if (ret == POPT_ERROR_BADOPT) {
2799                         fprintf(stderr,
2800                                 "\nInvalid option %s: %s\n\n",
2801                                 poptBadOption(pc, 0),
2802                                 poptStrerror(ret));
2803                 } else {
2804                         fprintf(stderr,
2805                                 "\npoptGetNextOpt returned %s\n",
2806                                 poptStrerror(ret));
2807                 }
2808
2809                 poptFreeContext(pc);
2810                 TALLOC_FREE(frame);
2811                 exit(1);
2812         }
2813
2814         while ((arg = poptGetArg(pc)) != NULL) {
2815                 ret = strv_add(frame, &servers, arg);
2816                 if (ret != 0) {
2817                         DBG_ERR("strv_add() failed\n");
2818                         poptFreeContext(pc);
2819                         TALLOC_FREE(frame);
2820                         exit(1);
2821                 }
2822         }
2823
2824         log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2825         if (log_stdout) {
2826                 setup_logging(progname, DEBUG_STDOUT);
2827         } else {
2828                 setup_logging(progname, DEBUG_FILE);
2829         }
2830
2831         /*
2832          * If "rpc start on demand helpers = true" in smb.conf we must
2833          * not start as standalone, only on demand from
2834          * local_np_connect() functions. Log an error message telling
2835          * the admin how to fix and then exit.
2836          */
2837         if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2838                 DBG_ERR("Cannot start in standalone mode if smb.conf "
2839                         "[global] setting "
2840                         "\"rpc start on demand helpers = true\" - "
2841                         "exiting\n");
2842                         TALLOC_FREE(frame);
2843                         exit(1);
2844         }
2845
2846         if (libexec_rpcds != 0) {
2847                 ret = rpc_host_list_servers(
2848                         dyn_SAMBA_LIBEXECDIR, frame, &servers);
2849                 if (ret != 0) {
2850                         DBG_ERR("Could not list libexec: %s\n",
2851                                 strerror(ret));
2852                         poptFreeContext(pc);
2853                         TALLOC_FREE(frame);
2854                         exit(1);
2855                 }
2856         }
2857
2858         num_servers = strv_count(servers);
2859         if (num_servers == 0) {
2860                 poptPrintUsage(pc, stderr, 0);
2861                 poptFreeContext(pc);
2862                 TALLOC_FREE(frame);
2863                 exit(1);
2864         }
2865
2866         poptFreeContext(pc);
2867
2868         cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2869
2870         if (log_stdout && cmdline_daemon_cfg->fork) {
2871                 DBG_ERR("Can't log to stdout unless in foreground\n");
2872                 TALLOC_FREE(frame);
2873                 exit(1);
2874         }
2875
2876         msg_ctx = global_messaging_context();
2877         if (msg_ctx == NULL) {
2878                 DBG_ERR("messaging_init() failed\n");
2879                 TALLOC_FREE(frame);
2880                 exit(1);
2881         }
2882         ev_ctx = messaging_tevent_context(msg_ctx);
2883
2884         if (cmdline_daemon_cfg->fork) {
2885                 become_daemon(
2886                         true,
2887                         cmdline_daemon_cfg->no_process_group,
2888                         log_stdout);
2889
2890                 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2891                 if (!NT_STATUS_IS_OK(status)) {
2892                         exit_daemon("reinit_after_fork() failed",
2893                                     map_errno_from_nt_status(status));
2894                 }
2895         } else {
2896                 DBG_DEBUG("Calling daemon_status\n");
2897                 daemon_status(progname, "Starting process ... ");
2898         }
2899
2900         BlockSignals(true, SIGPIPE);
2901
2902         dump_core_setup(progname, lp_logfile(frame, lp_sub));
2903
2904         reopen_logs();
2905
2906         DBG_STARTUP_NOTICE("%s version %s started.\n%s\n",
2907                            progname,
2908                            samba_version_string(),
2909                            samba_copyright_string());
2910
2911         (void)winbind_off();
2912         ok = init_guest_session_info(frame);
2913         (void)winbind_on();
2914         if (!ok) {
2915                 DBG_ERR("init_guest_session_info failed\n");
2916                 global_messaging_context_free();
2917                 TALLOC_FREE(frame);
2918                 exit(1);
2919         }
2920
2921         ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2922         if (ret != 0) {
2923                 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2924                           strerror(ret));
2925                 global_messaging_context_free();
2926                 TALLOC_FREE(frame);
2927                 exit(1);
2928         }
2929
2930         req = rpc_host_send(
2931                 ev_ctx,
2932                 ev_ctx,
2933                 msg_ctx,
2934                 servers,
2935                 ready_signal_fd,
2936                 cmdline_daemon_cfg->fork ? NULL : progname,
2937                 np_helper != 0);
2938         if (req == NULL) {
2939                 DBG_ERR("rpc_host_send failed\n");
2940                 global_messaging_context_free();
2941                 TALLOC_FREE(frame);
2942                 exit(1);
2943         }
2944
2945         if (!cmdline_daemon_cfg->fork) {
2946                 struct stat st;
2947                 if (fstat(0, &st) != 0) {
2948                         DBG_DEBUG("fstat(0) failed: %s\n",
2949                                   strerror(errno));
2950                         global_messaging_context_free();
2951                         TALLOC_FREE(frame);
2952                         exit(1);
2953                 }
2954                 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2955                         tevent_add_fd(
2956                                 ev_ctx,
2957                                 ev_ctx,
2958                                 0,
2959                                 TEVENT_FD_READ,
2960                                 samba_dcerpcd_stdin_handler,
2961                                 req);
2962                 }
2963         }
2964
2965         ok = tevent_req_poll_unix(req, ev_ctx, &err);
2966         if (!ok) {
2967                 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2968                         strerror(err));
2969                 global_messaging_context_free();
2970                 TALLOC_FREE(frame);
2971                 exit(1);
2972         }
2973
2974         status = rpc_host_recv(req);
2975         if (!NT_STATUS_IS_OK(status)) {
2976                 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2977                 global_messaging_context_free();
2978                 TALLOC_FREE(frame);
2979                 exit(1);
2980         }
2981
2982         TALLOC_FREE(frame);
2983
2984         return 0;
2985 }