Add prototypes required by samba-gtk.
[metze/samba/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          const struct ndr_syntax_id *interface)
86 {
87         int i;
88         for (i = 0; pipe_names[i].client_pipe; i++) {
89                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
90                                         interface)) {
91                         return &pipe_names[i].client_pipe[5];
92                 }
93         }
94
95         /*
96          * Here we should ask \\epmapper, but for now our code is only
97          * interested in the known pipes mentioned in pipe_names[]
98          */
99
100         return NULL;
101 }
102
103 /********************************************************************
104  Map internal value to wire value.
105  ********************************************************************/
106
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
108 {
109         switch (auth_type) {
110
111         case PIPE_AUTH_TYPE_NONE:
112                 return RPC_ANONYMOUS_AUTH_TYPE;
113
114         case PIPE_AUTH_TYPE_NTLMSSP:
115                 return RPC_NTLMSSP_AUTH_TYPE;
116
117         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119                 return RPC_SPNEGO_AUTH_TYPE;
120
121         case PIPE_AUTH_TYPE_SCHANNEL:
122                 return RPC_SCHANNEL_AUTH_TYPE;
123
124         case PIPE_AUTH_TYPE_KRB5:
125                 return RPC_KRB5_AUTH_TYPE;
126
127         default:
128                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
129                         "auth type %u\n",
130                         (unsigned int)auth_type ));
131                 break;
132         }
133         return -1;
134 }
135
136 /********************************************************************
137  Pipe description for a DEBUG
138  ********************************************************************/
139 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
140 {
141         char *result;
142
143         switch (cli->transport_type) {
144         case NCACN_NP:
145                 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
146                                          "fnum 0x%x",
147                                          cli->desthost,
148                                          cli->trans.np.pipe_name,
149                                          (unsigned int)(cli->trans.np.fnum));
150                 break;
151         case NCACN_IP_TCP:
152         case NCACN_UNIX_STREAM:
153                 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
154                                          cli->desthost, cli->trans.sock.fd);
155                 break;
156         default:
157                 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
158                 break;
159         }
160         SMB_ASSERT(result != NULL);
161         return result;
162 }
163
164 /********************************************************************
165  Rpc pipe call id.
166  ********************************************************************/
167
168 static uint32 get_rpc_call_id(void)
169 {
170         static uint32 call_id = 0;
171         return ++call_id;
172 }
173
174 /*
175  * Realloc pdu to have a least "size" bytes
176  */
177
178 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
179 {
180         size_t extra_size;
181
182         if (prs_data_size(pdu) >= size) {
183                 return true;
184         }
185
186         extra_size = size - prs_data_size(pdu);
187
188         if (!prs_force_grow(pdu, extra_size)) {
189                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
190                           "%d bytes.\n", (int)extra_size));
191                 return false;
192         }
193
194         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
195                   (int)extra_size, prs_data_size(pdu)));
196         return true;
197 }
198
199
200 /*******************************************************************
201  Use SMBreadX to get rest of one fragment's worth of rpc data.
202  Reads the whole size or give an error message
203  ********************************************************************/
204
205 struct rpc_read_state {
206         struct event_context *ev;
207         struct rpc_pipe_client *cli;
208         char *data;
209         size_t size;
210         size_t num_read;
211 };
212
213 static void rpc_read_np_done(struct async_req *subreq);
214 static void rpc_read_sock_done(struct async_req *subreq);
215
216 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
217                                        struct event_context *ev,
218                                        struct rpc_pipe_client *cli,
219                                        char *data, size_t size)
220 {
221         struct async_req *result, *subreq;
222         struct rpc_read_state *state;
223
224         if (!async_req_setup(mem_ctx, &result, &state,
225                              struct rpc_read_state)) {
226                 return NULL;
227         }
228         state->ev = ev;
229         state->cli = cli;
230         state->data = data;
231         state->size = size;
232         state->num_read = 0;
233
234         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
235
236         if (cli->transport_type == NCACN_NP) {
237                 subreq = cli_read_andx_send(
238                         state, ev, cli->trans.np.cli,
239                         cli->trans.np.fnum, 0, size);
240                 if (subreq == NULL) {
241                         DEBUG(10, ("cli_read_andx_send failed\n"));
242                         goto fail;
243                 }
244                 subreq->async.fn = rpc_read_np_done;
245                 subreq->async.priv = result;
246                 return result;
247         }
248
249         if ((cli->transport_type == NCACN_IP_TCP)
250             || (cli->transport_type == NCACN_UNIX_STREAM)) {
251                 subreq = recvall_send(state, ev, cli->trans.sock.fd,
252                                       data, size, 0);
253                 if (subreq == NULL) {
254                         DEBUG(10, ("recvall_send failed\n"));
255                         goto fail;
256                 }
257                 subreq->async.fn = rpc_read_sock_done;
258                 subreq->async.priv = result;
259                 return result;
260         }
261
262         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
263                 return result;
264         }
265  fail:
266         TALLOC_FREE(result);
267         return NULL;
268 }
269
270 static void rpc_read_np_done(struct async_req *subreq)
271 {
272         struct async_req *req = talloc_get_type_abort(
273                 subreq->async.priv, struct async_req);
274         struct rpc_read_state *state = talloc_get_type_abort(
275                 req->private_data, struct rpc_read_state);
276         NTSTATUS status;
277         ssize_t received;
278         uint8_t *rcvbuf;
279
280         status = cli_read_andx_recv(subreq, &received, &rcvbuf);
281         /*
282          * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
283          * child of that.
284          */
285         if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
286                 status = NT_STATUS_OK;
287         }
288         if (!NT_STATUS_IS_OK(status)) {
289                 TALLOC_FREE(subreq);
290                 async_req_error(req, status);
291                 return;
292         }
293
294         memcpy(state->data + state->num_read, rcvbuf, received);
295         TALLOC_FREE(subreq);
296
297         state->num_read += received;
298
299         if (state->num_read == state->size) {
300                 async_req_done(req);
301                 return;
302         }
303
304         subreq = cli_read_andx_send(
305                 state, state->ev, state->cli->trans.np.cli,
306                 state->cli->trans.np.fnum, 0,
307                 state->size - state->num_read);
308
309         if (async_req_nomem(subreq, req)) {
310                 return;
311         }
312
313         subreq->async.fn = rpc_read_np_done;
314         subreq->async.priv = req;
315 }
316
317 static void rpc_read_sock_done(struct async_req *subreq)
318 {
319         struct async_req *req = talloc_get_type_abort(
320                 subreq->async.priv, struct async_req);
321         NTSTATUS status;
322
323         status = recvall_recv(subreq);
324         TALLOC_FREE(subreq);
325         if (!NT_STATUS_IS_OK(status)) {
326                 async_req_error(req, status);
327                 return;
328         }
329
330         async_req_done(req);
331 }
332
333 static NTSTATUS rpc_read_recv(struct async_req *req)
334 {
335         return async_req_simple_recv(req);
336 }
337
338 struct rpc_write_state {
339         struct event_context *ev;
340         struct rpc_pipe_client *cli;
341         const char *data;
342         size_t size;
343         size_t num_written;
344 };
345
346 static void rpc_write_np_done(struct async_req *subreq);
347 static void rpc_write_sock_done(struct async_req *subreq);
348
349 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
350                                         struct event_context *ev,
351                                         struct rpc_pipe_client *cli,
352                                         const char *data, size_t size)
353 {
354         struct async_req *result, *subreq;
355         struct rpc_write_state *state;
356
357         if (!async_req_setup(mem_ctx, &result, &state,
358                              struct rpc_write_state)) {
359                 return NULL;
360         }
361         state->ev = ev;
362         state->cli = cli;
363         state->data = data;
364         state->size = size;
365         state->num_written = 0;
366
367         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
368
369         if (cli->transport_type == NCACN_NP) {
370                 subreq = cli_write_andx_send(
371                         state, ev, cli->trans.np.cli,
372                         cli->trans.np.fnum, 8, /* 8 means message mode. */
373                         (uint8_t *)data, 0, size);
374                 if (subreq == NULL) {
375                         DEBUG(10, ("cli_write_andx_send failed\n"));
376                         goto fail;
377                 }
378                 subreq->async.fn = rpc_write_np_done;
379                 subreq->async.priv = result;
380                 return result;
381         }
382
383         if ((cli->transport_type == NCACN_IP_TCP)
384             || (cli->transport_type == NCACN_UNIX_STREAM)) {
385                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
386                                       data, size, 0);
387                 if (subreq == NULL) {
388                         DEBUG(10, ("sendall_send failed\n"));
389                         goto fail;
390                 }
391                 subreq->async.fn = rpc_write_sock_done;
392                 subreq->async.priv = result;
393                 return result;
394         }
395
396         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
397                 return result;
398         }
399  fail:
400         TALLOC_FREE(result);
401         return NULL;
402 }
403
404 static void rpc_write_np_done(struct async_req *subreq)
405 {
406         struct async_req *req = talloc_get_type_abort(
407                 subreq->async.priv, struct async_req);
408         struct rpc_write_state *state = talloc_get_type_abort(
409                 req->private_data, struct rpc_write_state);
410         NTSTATUS status;
411         size_t written;
412
413         status = cli_write_andx_recv(subreq, &written);
414         TALLOC_FREE(subreq);
415         if (!NT_STATUS_IS_OK(status)) {
416                 async_req_error(req, status);
417                 return;
418         }
419
420         state->num_written += written;
421
422         if (state->num_written == state->size) {
423                 async_req_done(req);
424                 return;
425         }
426
427         subreq = cli_write_andx_send(
428                 state, state->ev, state->cli->trans.np.cli,
429                 state->cli->trans.np.fnum, 8,
430                 (uint8_t *)(state->data + state->num_written),
431                 0, state->size - state->num_written);
432
433         if (async_req_nomem(subreq, req)) {
434                 return;
435         }
436
437         subreq->async.fn = rpc_write_np_done;
438         subreq->async.priv = req;
439 }
440
441 static void rpc_write_sock_done(struct async_req *subreq)
442 {
443         struct async_req *req = talloc_get_type_abort(
444                 subreq->async.priv, struct async_req);
445         NTSTATUS status;
446
447         status = sendall_recv(subreq);
448         TALLOC_FREE(subreq);
449         if (!NT_STATUS_IS_OK(status)) {
450                 async_req_error(req, status);
451                 return;
452         }
453
454         async_req_done(req);
455 }
456
457 static NTSTATUS rpc_write_recv(struct async_req *req)
458 {
459         return async_req_simple_recv(req);
460 }
461
462
463 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
464                                  struct rpc_hdr_info *prhdr,
465                                  prs_struct *pdu)
466 {
467         /*
468          * This next call sets the endian bit correctly in current_pdu. We
469          * will propagate this to rbuf later.
470          */
471
472         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
473                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
474                 return NT_STATUS_BUFFER_TOO_SMALL;
475         }
476
477         if (prhdr->frag_len > cli->max_recv_frag) {
478                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
479                           " we only allow %d\n", (int)prhdr->frag_len,
480                           (int)cli->max_recv_frag));
481                 return NT_STATUS_BUFFER_TOO_SMALL;
482         }
483
484         return NT_STATUS_OK;
485 }
486
487 /****************************************************************************
488  Try and get a PDU's worth of data from current_pdu. If not, then read more
489  from the wire.
490  ****************************************************************************/
491
492 struct get_complete_frag_state {
493         struct event_context *ev;
494         struct rpc_pipe_client *cli;
495         struct rpc_hdr_info *prhdr;
496         prs_struct *pdu;
497 };
498
499 static void get_complete_frag_got_header(struct async_req *subreq);
500 static void get_complete_frag_got_rest(struct async_req *subreq);
501
502 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
503                                                struct event_context *ev,
504                                                struct rpc_pipe_client *cli,
505                                                struct rpc_hdr_info *prhdr,
506                                                prs_struct *pdu)
507 {
508         struct async_req *result, *subreq;
509         struct get_complete_frag_state *state;
510         uint32_t pdu_len;
511         NTSTATUS status;
512
513         if (!async_req_setup(mem_ctx, &result, &state,
514                              struct get_complete_frag_state)) {
515                 return NULL;
516         }
517         state->ev = ev;
518         state->cli = cli;
519         state->prhdr = prhdr;
520         state->pdu = pdu;
521
522         pdu_len = prs_data_size(pdu);
523         if (pdu_len < RPC_HEADER_LEN) {
524                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
525                         status = NT_STATUS_NO_MEMORY;
526                         goto post_status;
527                 }
528                 subreq = rpc_read_send(state, state->ev, state->cli,
529                                        prs_data_p(state->pdu) + pdu_len,
530                                        RPC_HEADER_LEN - pdu_len);
531                 if (subreq == NULL) {
532                         status = NT_STATUS_NO_MEMORY;
533                         goto post_status;
534                 }
535                 subreq->async.fn = get_complete_frag_got_header;
536                 subreq->async.priv = result;
537                 return result;
538         }
539
540         status = parse_rpc_header(cli, prhdr, pdu);
541         if (!NT_STATUS_IS_OK(status)) {
542                 goto post_status;
543         }
544
545         /*
546          * Ensure we have frag_len bytes of data.
547          */
548         if (pdu_len < prhdr->frag_len) {
549                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
550                         status = NT_STATUS_NO_MEMORY;
551                         goto post_status;
552                 }
553                 subreq = rpc_read_send(state, state->ev, state->cli,
554                                        prs_data_p(pdu) + pdu_len,
555                                        prhdr->frag_len - pdu_len);
556                 if (subreq == NULL) {
557                         status = NT_STATUS_NO_MEMORY;
558                         goto post_status;
559                 }
560                 subreq->async.fn = get_complete_frag_got_rest;
561                 subreq->async.priv = result;
562                 return result;
563         }
564
565         status = NT_STATUS_OK;
566  post_status:
567         if (async_post_status(result, ev, status)) {
568                 return result;
569         }
570         TALLOC_FREE(result);
571         return NULL;
572 }
573
574 static void get_complete_frag_got_header(struct async_req *subreq)
575 {
576         struct async_req *req = talloc_get_type_abort(
577                 subreq->async.priv, struct async_req);
578         struct get_complete_frag_state *state = talloc_get_type_abort(
579                 req->private_data, struct get_complete_frag_state);
580         NTSTATUS status;
581
582         status = rpc_read_recv(subreq);
583         TALLOC_FREE(subreq);
584         if (!NT_STATUS_IS_OK(status)) {
585                 async_req_error(req, status);
586                 return;
587         }
588
589         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
590         if (!NT_STATUS_IS_OK(status)) {
591                 async_req_error(req, status);
592                 return;
593         }
594
595         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
596                 async_req_error(req, NT_STATUS_NO_MEMORY);
597                 return;
598         }
599
600         /*
601          * We're here in this piece of code because we've read exactly
602          * RPC_HEADER_LEN bytes into state->pdu.
603          */
604
605         subreq = rpc_read_send(state, state->ev, state->cli,
606                                prs_data_p(state->pdu) + RPC_HEADER_LEN,
607                                state->prhdr->frag_len - RPC_HEADER_LEN);
608         if (async_req_nomem(subreq, req)) {
609                 return;
610         }
611         subreq->async.fn = get_complete_frag_got_rest;
612         subreq->async.priv = req;
613 }
614
615 static void get_complete_frag_got_rest(struct async_req *subreq)
616 {
617         struct async_req *req = talloc_get_type_abort(
618                 subreq->async.priv, struct async_req);
619         NTSTATUS status;
620
621         status = rpc_read_recv(subreq);
622         TALLOC_FREE(subreq);
623         if (!NT_STATUS_IS_OK(status)) {
624                 async_req_error(req, status);
625                 return;
626         }
627         async_req_done(req);
628 }
629
630 static NTSTATUS get_complete_frag_recv(struct async_req *req)
631 {
632         return async_req_simple_recv(req);
633 }
634
635 /****************************************************************************
636  NTLMSSP specific sign/seal.
637  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
638  In fact I should probably abstract these into identical pieces of code... JRA.
639  ****************************************************************************/
640
641 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
642                                 prs_struct *current_pdu,
643                                 uint8 *p_ss_padding_len)
644 {
645         RPC_HDR_AUTH auth_info;
646         uint32 save_offset = prs_offset(current_pdu);
647         uint32 auth_len = prhdr->auth_len;
648         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
649         unsigned char *data = NULL;
650         size_t data_len;
651         unsigned char *full_packet_data = NULL;
652         size_t full_packet_data_len;
653         DATA_BLOB auth_blob;
654         NTSTATUS status;
655
656         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
657             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
658                 return NT_STATUS_OK;
659         }
660
661         if (!ntlmssp_state) {
662                 return NT_STATUS_INVALID_PARAMETER;
663         }
664
665         /* Ensure there's enough data for an authenticated response. */
666         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
667                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
668                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
669                         (unsigned int)auth_len ));
670                 return NT_STATUS_BUFFER_TOO_SMALL;
671         }
672
673         /*
674          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
675          * after the RPC header.
676          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
677          * functions as NTLMv2 checks the rpc headers also.
678          */
679
680         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
681         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
682
683         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
684         full_packet_data_len = prhdr->frag_len - auth_len;
685
686         /* Pull the auth header and the following data into a blob. */
687         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
688                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
689                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
690                 return NT_STATUS_BUFFER_TOO_SMALL;
691         }
692
693         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
694                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
695                 return NT_STATUS_BUFFER_TOO_SMALL;
696         }
697
698         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
699         auth_blob.length = auth_len;
700
701         switch (cli->auth->auth_level) {
702                 case PIPE_AUTH_LEVEL_PRIVACY:
703                         /* Data is encrypted. */
704                         status = ntlmssp_unseal_packet(ntlmssp_state,
705                                                         data, data_len,
706                                                         full_packet_data,
707                                                         full_packet_data_len,
708                                                         &auth_blob);
709                         if (!NT_STATUS_IS_OK(status)) {
710                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
711                                         "packet from %s. Error was %s.\n",
712                                         rpccli_pipe_txt(debug_ctx(), cli),
713                                         nt_errstr(status) ));
714                                 return status;
715                         }
716                         break;
717                 case PIPE_AUTH_LEVEL_INTEGRITY:
718                         /* Data is signed. */
719                         status = ntlmssp_check_packet(ntlmssp_state,
720                                                         data, data_len,
721                                                         full_packet_data,
722                                                         full_packet_data_len,
723                                                         &auth_blob);
724                         if (!NT_STATUS_IS_OK(status)) {
725                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
726                                         "packet from %s. Error was %s.\n",
727                                         rpccli_pipe_txt(debug_ctx(), cli),
728                                         nt_errstr(status) ));
729                                 return status;
730                         }
731                         break;
732                 default:
733                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
734                                   "auth level %d\n", cli->auth->auth_level));
735                         return NT_STATUS_INVALID_INFO_CLASS;
736         }
737
738         /*
739          * Return the current pointer to the data offset.
740          */
741
742         if(!prs_set_offset(current_pdu, save_offset)) {
743                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
744                         (unsigned int)save_offset ));
745                 return NT_STATUS_BUFFER_TOO_SMALL;
746         }
747
748         /*
749          * Remember the padding length. We must remove it from the real data
750          * stream once the sign/seal is done.
751          */
752
753         *p_ss_padding_len = auth_info.auth_pad_len;
754
755         return NT_STATUS_OK;
756 }
757
758 /****************************************************************************
759  schannel specific sign/seal.
760  ****************************************************************************/
761
762 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
763                                 prs_struct *current_pdu,
764                                 uint8 *p_ss_padding_len)
765 {
766         RPC_HDR_AUTH auth_info;
767         RPC_AUTH_SCHANNEL_CHK schannel_chk;
768         uint32 auth_len = prhdr->auth_len;
769         uint32 save_offset = prs_offset(current_pdu);
770         struct schannel_auth_struct *schannel_auth =
771                 cli->auth->a_u.schannel_auth;
772         uint32 data_len;
773
774         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
775             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
776                 return NT_STATUS_OK;
777         }
778
779         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
780                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
781                 return NT_STATUS_INVALID_PARAMETER;
782         }
783
784         if (!schannel_auth) {
785                 return NT_STATUS_INVALID_PARAMETER;
786         }
787
788         /* Ensure there's enough data for an authenticated response. */
789         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
790                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
791                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
792                         (unsigned int)auth_len ));
793                 return NT_STATUS_INVALID_PARAMETER;
794         }
795
796         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
797
798         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
799                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
800                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
801                 return NT_STATUS_BUFFER_TOO_SMALL;
802         }
803
804         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
805                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
806                 return NT_STATUS_BUFFER_TOO_SMALL;
807         }
808
809         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
810                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
811                         auth_info.auth_type));
812                 return NT_STATUS_BUFFER_TOO_SMALL;
813         }
814
815         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
816                                 &schannel_chk, current_pdu, 0)) {
817                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
818                 return NT_STATUS_BUFFER_TOO_SMALL;
819         }
820
821         if (!schannel_decode(schannel_auth,
822                         cli->auth->auth_level,
823                         SENDER_IS_ACCEPTOR,
824                         &schannel_chk,
825                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
826                         data_len)) {
827                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
828                                 "Connection to %s.\n",
829                                 rpccli_pipe_txt(debug_ctx(), cli)));
830                 return NT_STATUS_INVALID_PARAMETER;
831         }
832
833         /* The sequence number gets incremented on both send and receive. */
834         schannel_auth->seq_num++;
835
836         /*
837          * Return the current pointer to the data offset.
838          */
839
840         if(!prs_set_offset(current_pdu, save_offset)) {
841                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
842                         (unsigned int)save_offset ));
843                 return NT_STATUS_BUFFER_TOO_SMALL;
844         }
845
846         /*
847          * Remember the padding length. We must remove it from the real data
848          * stream once the sign/seal is done.
849          */
850
851         *p_ss_padding_len = auth_info.auth_pad_len;
852
853         return NT_STATUS_OK;
854 }
855
856 /****************************************************************************
857  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
858  ****************************************************************************/
859
860 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
861                                 prs_struct *current_pdu,
862                                 uint8 *p_ss_padding_len)
863 {
864         NTSTATUS ret = NT_STATUS_OK;
865
866         /* Paranioa checks for auth_len. */
867         if (prhdr->auth_len) {
868                 if (prhdr->auth_len > prhdr->frag_len) {
869                         return NT_STATUS_INVALID_PARAMETER;
870                 }
871
872                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
873                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
874                         /* Integer wrap attempt. */
875                         return NT_STATUS_INVALID_PARAMETER;
876                 }
877         }
878
879         /*
880          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
881          */
882
883         switch(cli->auth->auth_type) {
884                 case PIPE_AUTH_TYPE_NONE:
885                         if (prhdr->auth_len) {
886                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
887                                           "Connection to %s - got non-zero "
888                                           "auth len %u.\n",
889                                         rpccli_pipe_txt(debug_ctx(), cli),
890                                         (unsigned int)prhdr->auth_len ));
891                                 return NT_STATUS_INVALID_PARAMETER;
892                         }
893                         break;
894
895                 case PIPE_AUTH_TYPE_NTLMSSP:
896                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
897                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
898                         if (!NT_STATUS_IS_OK(ret)) {
899                                 return ret;
900                         }
901                         break;
902
903                 case PIPE_AUTH_TYPE_SCHANNEL:
904                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
905                         if (!NT_STATUS_IS_OK(ret)) {
906                                 return ret;
907                         }
908                         break;
909
910                 case PIPE_AUTH_TYPE_KRB5:
911                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
912                 default:
913                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
914                                   "to %s - unknown internal auth type %u.\n",
915                                   rpccli_pipe_txt(debug_ctx(), cli),
916                                   cli->auth->auth_type ));
917                         return NT_STATUS_INVALID_INFO_CLASS;
918         }
919
920         return NT_STATUS_OK;
921 }
922
923 /****************************************************************************
924  Do basic authentication checks on an incoming pdu.
925  ****************************************************************************/
926
927 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
928                         prs_struct *current_pdu,
929                         uint8 expected_pkt_type,
930                         char **ppdata,
931                         uint32 *pdata_len,
932                         prs_struct *return_data)
933 {
934
935         NTSTATUS ret = NT_STATUS_OK;
936         uint32 current_pdu_len = prs_data_size(current_pdu);
937
938         if (current_pdu_len != prhdr->frag_len) {
939                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
940                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
941                 return NT_STATUS_INVALID_PARAMETER;
942         }
943
944         /*
945          * Point the return values at the real data including the RPC
946          * header. Just in case the caller wants it.
947          */
948         *ppdata = prs_data_p(current_pdu);
949         *pdata_len = current_pdu_len;
950
951         /* Ensure we have the correct type. */
952         switch (prhdr->pkt_type) {
953                 case RPC_ALTCONTRESP:
954                 case RPC_BINDACK:
955
956                         /* Alter context and bind ack share the same packet definitions. */
957                         break;
958
959
960                 case RPC_RESPONSE:
961                 {
962                         RPC_HDR_RESP rhdr_resp;
963                         uint8 ss_padding_len = 0;
964
965                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
966                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
967                                 return NT_STATUS_BUFFER_TOO_SMALL;
968                         }
969
970                         /* Here's where we deal with incoming sign/seal. */
971                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
972                                         current_pdu, &ss_padding_len);
973                         if (!NT_STATUS_IS_OK(ret)) {
974                                 return ret;
975                         }
976
977                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
978                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
979
980                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
981                                 return NT_STATUS_BUFFER_TOO_SMALL;
982                         }
983
984                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
985
986                         /* Remember to remove the auth footer. */
987                         if (prhdr->auth_len) {
988                                 /* We've already done integer wrap tests on auth_len in
989                                         cli_pipe_validate_rpc_response(). */
990                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
991                                         return NT_STATUS_BUFFER_TOO_SMALL;
992                                 }
993                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
994                         }
995
996                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
997                                 current_pdu_len, *pdata_len, ss_padding_len ));
998
999                         /*
1000                          * If this is the first reply, and the allocation hint is reasonably, try and
1001                          * set up the return_data parse_struct to the correct size.
1002                          */
1003
1004                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1005                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1006                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1007                                                 "too large to allocate\n",
1008                                                 (unsigned int)rhdr_resp.alloc_hint ));
1009                                         return NT_STATUS_NO_MEMORY;
1010                                 }
1011                         }
1012
1013                         break;
1014                 }
1015
1016                 case RPC_BINDNACK:
1017                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1018                                   "received from %s!\n",
1019                                   rpccli_pipe_txt(debug_ctx(), cli)));
1020                         /* Use this for now... */
1021                         return NT_STATUS_NETWORK_ACCESS_DENIED;
1022
1023                 case RPC_FAULT:
1024                 {
1025                         RPC_HDR_RESP rhdr_resp;
1026                         RPC_HDR_FAULT fault_resp;
1027
1028                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1029                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1030                                 return NT_STATUS_BUFFER_TOO_SMALL;
1031                         }
1032
1033                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1034                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1035                                 return NT_STATUS_BUFFER_TOO_SMALL;
1036                         }
1037
1038                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1039                                   "code %s received from %s!\n",
1040                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1041                                 rpccli_pipe_txt(debug_ctx(), cli)));
1042                         if (NT_STATUS_IS_OK(fault_resp.status)) {
1043                                 return NT_STATUS_UNSUCCESSFUL;
1044                         } else {
1045                                 return fault_resp.status;
1046                         }
1047                 }
1048
1049                 default:
1050                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1051                                 "from %s!\n",
1052                                 (unsigned int)prhdr->pkt_type,
1053                                 rpccli_pipe_txt(debug_ctx(), cli)));
1054                         return NT_STATUS_INVALID_INFO_CLASS;
1055         }
1056
1057         if (prhdr->pkt_type != expected_pkt_type) {
1058                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1059                           "got an unexpected RPC packet type - %u, not %u\n",
1060                         rpccli_pipe_txt(debug_ctx(), cli),
1061                         prhdr->pkt_type,
1062                         expected_pkt_type));
1063                 return NT_STATUS_INVALID_INFO_CLASS;
1064         }
1065
1066         /* Do this just before return - we don't want to modify any rpc header
1067            data before now as we may have needed to do cryptographic actions on
1068            it before. */
1069
1070         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1071                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1072                         "setting fragment first/last ON.\n"));
1073                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1074         }
1075
1076         return NT_STATUS_OK;
1077 }
1078
1079 /****************************************************************************
1080  Ensure we eat the just processed pdu from the current_pdu prs_struct.
1081  Normally the frag_len and buffer size will match, but on the first trans
1082  reply there is a theoretical chance that buffer size > frag_len, so we must
1083  deal with that.
1084  ****************************************************************************/
1085
1086 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1087 {
1088         uint32 current_pdu_len = prs_data_size(current_pdu);
1089
1090         if (current_pdu_len < prhdr->frag_len) {
1091                 return NT_STATUS_BUFFER_TOO_SMALL;
1092         }
1093
1094         /* Common case. */
1095         if (current_pdu_len == (uint32)prhdr->frag_len) {
1096                 prs_mem_free(current_pdu);
1097                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1098                 /* Make current_pdu dynamic with no memory. */
1099                 prs_give_memory(current_pdu, 0, 0, True);
1100                 return NT_STATUS_OK;
1101         }
1102
1103         /*
1104          * Oh no ! More data in buffer than we processed in current pdu.
1105          * Cheat. Move the data down and shrink the buffer.
1106          */
1107
1108         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1109                         current_pdu_len - prhdr->frag_len);
1110
1111         /* Remember to set the read offset back to zero. */
1112         prs_set_offset(current_pdu, 0);
1113
1114         /* Shrink the buffer. */
1115         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1116                 return NT_STATUS_BUFFER_TOO_SMALL;
1117         }
1118
1119         return NT_STATUS_OK;
1120 }
1121
1122 /****************************************************************************
1123  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1124 ****************************************************************************/
1125
1126 struct cli_api_pipe_state {
1127         struct event_context *ev;
1128         struct rpc_pipe_client *cli;
1129         uint32_t max_rdata_len;
1130         uint8_t *rdata;
1131         uint32_t rdata_len;
1132 };
1133
1134 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1135 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1137
1138 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1139                                            struct event_context *ev,
1140                                            struct rpc_pipe_client *cli,
1141                                            uint8_t *data, size_t data_len,
1142                                            uint32_t max_rdata_len)
1143 {
1144         struct async_req *result, *subreq;
1145         struct cli_api_pipe_state *state;
1146         NTSTATUS status;
1147
1148         if (!async_req_setup(mem_ctx, &result, &state,
1149                              struct cli_api_pipe_state)) {
1150                 return NULL;
1151         }
1152         state->ev = ev;
1153         state->cli = cli;
1154         state->max_rdata_len = max_rdata_len;
1155
1156         if (state->max_rdata_len < RPC_HEADER_LEN) {
1157                 /*
1158                  * For a RPC reply we always need at least RPC_HEADER_LEN
1159                  * bytes. We check this here because we will receive
1160                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1161                  */
1162                 status = NT_STATUS_INVALID_PARAMETER;
1163                 goto post_status;
1164         }
1165
1166         if (cli->transport_type == NCACN_NP) {
1167
1168                 uint16_t setup[2];
1169                 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1170                 SSVAL(setup+1, 0, cli->trans.np.fnum);
1171
1172                 subreq = cli_trans_send(
1173                         state, ev, cli->trans.np.cli, SMBtrans,
1174                         "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1175                         NULL, 0, 0, data, data_len, max_rdata_len);
1176                 if (subreq == NULL) {
1177                         status = NT_STATUS_NO_MEMORY;
1178                         goto post_status;
1179                 }
1180                 subreq->async.fn = cli_api_pipe_np_trans_done;
1181                 subreq->async.priv = result;
1182                 return result;
1183         }
1184
1185         if ((cli->transport_type == NCACN_IP_TCP)
1186             || (cli->transport_type == NCACN_UNIX_STREAM)) {
1187                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1188                                       data, data_len, 0);
1189                 if (subreq == NULL) {
1190                         status = NT_STATUS_NO_MEMORY;
1191                         goto post_status;
1192                 }
1193                 subreq->async.fn = cli_api_pipe_sock_send_done;
1194                 subreq->async.priv = result;
1195                 return result;
1196         }
1197
1198         status = NT_STATUS_INVALID_PARAMETER;
1199
1200  post_status:
1201         if (async_post_status(result, ev, status)) {
1202                 return result;
1203         }
1204         TALLOC_FREE(result);
1205         return NULL;
1206 }
1207
1208 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1209 {
1210         struct async_req *req = talloc_get_type_abort(
1211                 subreq->async.priv, struct async_req);
1212         struct cli_api_pipe_state *state = talloc_get_type_abort(
1213                 req->private_data, struct cli_api_pipe_state);
1214         NTSTATUS status;
1215
1216         status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1217                                 &state->rdata, &state->rdata_len);
1218         TALLOC_FREE(subreq);
1219         if (!NT_STATUS_IS_OK(status)) {
1220                 async_req_error(req, status);
1221                 return;
1222         }
1223         async_req_done(req);
1224 }
1225
1226 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1227 {
1228         struct async_req *req = talloc_get_type_abort(
1229                 subreq->async.priv, struct async_req);
1230         struct cli_api_pipe_state *state = talloc_get_type_abort(
1231                 req->private_data, struct cli_api_pipe_state);
1232         NTSTATUS status;
1233
1234         status = sendall_recv(subreq);
1235         TALLOC_FREE(subreq);
1236         if (!NT_STATUS_IS_OK(status)) {
1237                 async_req_error(req, status);
1238                 return;
1239         }
1240
1241         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1242         if (async_req_nomem(state->rdata, req)) {
1243                 return;
1244         }
1245         state->rdata_len = RPC_HEADER_LEN;
1246
1247         subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1248                               state->rdata, RPC_HEADER_LEN, 0);
1249         if (async_req_nomem(subreq, req)) {
1250                 return;
1251         }
1252         subreq->async.fn = cli_api_pipe_sock_read_done;
1253         subreq->async.priv = req;
1254 }
1255
1256 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1257 {
1258         struct async_req *req = talloc_get_type_abort(
1259                 subreq->async.priv, struct async_req);
1260         NTSTATUS status;
1261
1262         status = recvall_recv(subreq);
1263         TALLOC_FREE(subreq);
1264         if (!NT_STATUS_IS_OK(status)) {
1265                 async_req_error(req, status);
1266                 return;
1267         }
1268         async_req_done(req);
1269 }
1270
1271 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1272                                   uint8_t **prdata, uint32_t *prdata_len)
1273 {
1274         struct cli_api_pipe_state *state = talloc_get_type_abort(
1275                 req->private_data, struct cli_api_pipe_state);
1276         NTSTATUS status;
1277
1278         if (async_req_is_error(req, &status)) {
1279                 return status;
1280         }
1281
1282         *prdata = talloc_move(mem_ctx, &state->rdata);
1283         *prdata_len = state->rdata_len;
1284         return NT_STATUS_OK;
1285 }
1286
1287 /****************************************************************************
1288  Send data on an rpc pipe via trans. The prs_struct data must be the last
1289  pdu fragment of an NDR data stream.
1290
1291  Receive response data from an rpc pipe, which may be large...
1292
1293  Read the first fragment: unfortunately have to use SMBtrans for the first
1294  bit, then SMBreadX for subsequent bits.
1295
1296  If first fragment received also wasn't the last fragment, continue
1297  getting fragments until we _do_ receive the last fragment.
1298
1299  Request/Response PDU's look like the following...
1300
1301  |<------------------PDU len----------------------------------------------->|
1302  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1303
1304  +------------+-----------------+-------------+---------------+-------------+
1305  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1306  +------------+-----------------+-------------+---------------+-------------+
1307
1308  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1309  signing & sealing being negotiated.
1310
1311  ****************************************************************************/
1312
1313 struct rpc_api_pipe_state {
1314         struct event_context *ev;
1315         struct rpc_pipe_client *cli;
1316         uint8_t expected_pkt_type;
1317
1318         prs_struct incoming_frag;
1319         struct rpc_hdr_info rhdr;
1320
1321         prs_struct incoming_pdu;        /* Incoming reply */
1322         uint32_t incoming_pdu_offset;
1323 };
1324
1325 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1326 {
1327         prs_mem_free(&state->incoming_frag);
1328         prs_mem_free(&state->incoming_pdu);
1329         return 0;
1330 }
1331
1332 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1333 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1334
1335 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1336                                            struct event_context *ev,
1337                                            struct rpc_pipe_client *cli,
1338                                            prs_struct *data, /* Outgoing PDU */
1339                                            uint8_t expected_pkt_type)
1340 {
1341         struct async_req *result, *subreq;
1342         struct rpc_api_pipe_state *state;
1343         uint16_t max_recv_frag;
1344         NTSTATUS status;
1345
1346         if (!async_req_setup(mem_ctx, &result, &state,
1347                              struct rpc_api_pipe_state)) {
1348                 return NULL;
1349         }
1350         state->ev = ev;
1351         state->cli = cli;
1352         state->expected_pkt_type = expected_pkt_type;
1353         state->incoming_pdu_offset = 0;
1354
1355         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1356
1357         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1358         /* Make incoming_pdu dynamic with no memory. */
1359         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1360
1361         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1362
1363         /*
1364          * Ensure we're not sending too much.
1365          */
1366         if (prs_offset(data) > cli->max_xmit_frag) {
1367                 status = NT_STATUS_INVALID_PARAMETER;
1368                 goto post_status;
1369         }
1370
1371         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1372
1373         max_recv_frag = cli->max_recv_frag;
1374
1375 #ifdef DEVELOPER
1376         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1377 #endif
1378
1379         subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1380                                    prs_offset(data), max_recv_frag);
1381         if (subreq == NULL) {
1382                 status = NT_STATUS_NO_MEMORY;
1383                 goto post_status;
1384         }
1385         subreq->async.fn = rpc_api_pipe_trans_done;
1386         subreq->async.priv = result;
1387         return result;
1388
1389  post_status:
1390         if (async_post_status(result, ev, status)) {
1391                 return result;
1392         }
1393         TALLOC_FREE(result);
1394         return NULL;
1395 }
1396
1397 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1398 {
1399         struct async_req *req = talloc_get_type_abort(
1400                 subreq->async.priv, struct async_req);
1401         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1402                 req->private_data, struct rpc_api_pipe_state);
1403         NTSTATUS status;
1404         uint8_t *rdata = NULL;
1405         uint32_t rdata_len = 0;
1406         char *rdata_copy;
1407
1408         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1409         TALLOC_FREE(subreq);
1410         if (!NT_STATUS_IS_OK(status)) {
1411                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1412                 async_req_error(req, status);
1413                 return;
1414         }
1415
1416         if (rdata == NULL) {
1417                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1418                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1419                 async_req_done(req);
1420                 return;
1421         }
1422
1423         /*
1424          * Give the memory received from cli_trans as dynamic to the current
1425          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1426          * :-(
1427          */
1428         rdata_copy = (char *)memdup(rdata, rdata_len);
1429         TALLOC_FREE(rdata);
1430         if (async_req_nomem(rdata_copy, req)) {
1431                 return;
1432         }
1433         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1434
1435         /* Ensure we have enough data for a pdu. */
1436         subreq = get_complete_frag_send(state, state->ev, state->cli,
1437                                         &state->rhdr, &state->incoming_frag);
1438         if (async_req_nomem(subreq, req)) {
1439                 return;
1440         }
1441         subreq->async.fn = rpc_api_pipe_got_pdu;
1442         subreq->async.priv = req;
1443 }
1444
1445 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1446 {
1447         struct async_req *req = talloc_get_type_abort(
1448                 subreq->async.priv, struct async_req);
1449         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450                 req->private_data, struct rpc_api_pipe_state);
1451         NTSTATUS status;
1452         char *rdata = NULL;
1453         uint32_t rdata_len = 0;
1454
1455         status = get_complete_frag_recv(subreq);
1456         TALLOC_FREE(subreq);
1457         if (!NT_STATUS_IS_OK(status)) {
1458                 DEBUG(5, ("get_complete_frag failed: %s\n",
1459                           nt_errstr(status)));
1460                 async_req_error(req, status);
1461                 return;
1462         }
1463
1464         status = cli_pipe_validate_current_pdu(
1465                 state->cli, &state->rhdr, &state->incoming_frag,
1466                 state->expected_pkt_type, &rdata, &rdata_len,
1467                 &state->incoming_pdu);
1468
1469         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1470                   (unsigned)prs_data_size(&state->incoming_frag),
1471                   (unsigned)state->incoming_pdu_offset,
1472                   nt_errstr(status)));
1473
1474         if (!NT_STATUS_IS_OK(status)) {
1475                 async_req_error(req, status);
1476                 return;
1477         }
1478
1479         if ((state->rhdr.flags & RPC_FLG_FIRST)
1480             && (state->rhdr.pack_type[0] == 0)) {
1481                 /*
1482                  * Set the data type correctly for big-endian data on the
1483                  * first packet.
1484                  */
1485                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1486                           "big-endian.\n",
1487                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1488                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1489         }
1490         /*
1491          * Check endianness on subsequent packets.
1492          */
1493         if (state->incoming_frag.bigendian_data
1494             != state->incoming_pdu.bigendian_data) {
1495                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1496                          "%s\n",
1497                          state->incoming_pdu.bigendian_data?"big":"little",
1498                          state->incoming_frag.bigendian_data?"big":"little"));
1499                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1500                 return;
1501         }
1502
1503         /* Now copy the data portion out of the pdu into rbuf. */
1504         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1505                 async_req_error(req, NT_STATUS_NO_MEMORY);
1506                 return;
1507         }
1508
1509         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1510                rdata, (size_t)rdata_len);
1511         state->incoming_pdu_offset += rdata_len;
1512
1513         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1514                                             &state->incoming_frag);
1515         if (!NT_STATUS_IS_OK(status)) {
1516                 async_req_error(req, status);
1517                 return;
1518         }
1519
1520         if (state->rhdr.flags & RPC_FLG_LAST) {
1521                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1522                           rpccli_pipe_txt(debug_ctx(), state->cli),
1523                           (unsigned)prs_data_size(&state->incoming_pdu)));
1524                 async_req_done(req);
1525                 return;
1526         }
1527
1528         subreq = get_complete_frag_send(state, state->ev, state->cli,
1529                                         &state->rhdr, &state->incoming_frag);
1530         if (async_req_nomem(subreq, req)) {
1531                 return;
1532         }
1533         subreq->async.fn = rpc_api_pipe_got_pdu;
1534         subreq->async.priv = req;
1535 }
1536
1537 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1538                                   prs_struct *reply_pdu)
1539 {
1540         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1541                 req->private_data, struct rpc_api_pipe_state);
1542         NTSTATUS status;
1543
1544         if (async_req_is_error(req, &status)) {
1545                 return status;
1546         }
1547
1548         *reply_pdu = state->incoming_pdu;
1549         reply_pdu->mem_ctx = mem_ctx;
1550
1551         /*
1552          * Prevent state->incoming_pdu from being freed in
1553          * rpc_api_pipe_state_destructor()
1554          */
1555         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1556
1557         return NT_STATUS_OK;
1558 }
1559
1560 /*******************************************************************
1561  Creates krb5 auth bind.
1562  ********************************************************************/
1563
1564 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1565                                                 enum pipe_auth_level auth_level,
1566                                                 RPC_HDR_AUTH *pauth_out,
1567                                                 prs_struct *auth_data)
1568 {
1569 #ifdef HAVE_KRB5
1570         int ret;
1571         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1572         DATA_BLOB tkt = data_blob_null;
1573         DATA_BLOB tkt_wrapped = data_blob_null;
1574
1575         /* We may change the pad length before marshalling. */
1576         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1577
1578         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1579                 a->service_principal ));
1580
1581         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1582
1583         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1584                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1585
1586         if (ret) {
1587                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1588                         "failed with %s\n",
1589                         a->service_principal,
1590                         error_message(ret) ));
1591
1592                 data_blob_free(&tkt);
1593                 prs_mem_free(auth_data);
1594                 return NT_STATUS_INVALID_PARAMETER;
1595         }
1596
1597         /* wrap that up in a nice GSS-API wrapping */
1598         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1599
1600         data_blob_free(&tkt);
1601
1602         /* Auth len in the rpc header doesn't include auth_header. */
1603         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1604                 data_blob_free(&tkt_wrapped);
1605                 prs_mem_free(auth_data);
1606                 return NT_STATUS_NO_MEMORY;
1607         }
1608
1609         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1610         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1611
1612         data_blob_free(&tkt_wrapped);
1613         return NT_STATUS_OK;
1614 #else
1615         return NT_STATUS_INVALID_PARAMETER;
1616 #endif
1617 }
1618
1619 /*******************************************************************
1620  Creates SPNEGO NTLMSSP auth bind.
1621  ********************************************************************/
1622
1623 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624                                                 enum pipe_auth_level auth_level,
1625                                                 RPC_HDR_AUTH *pauth_out,
1626                                                 prs_struct *auth_data)
1627 {
1628         NTSTATUS nt_status;
1629         DATA_BLOB null_blob = data_blob_null;
1630         DATA_BLOB request = data_blob_null;
1631         DATA_BLOB spnego_msg = data_blob_null;
1632
1633         /* We may change the pad length before marshalling. */
1634         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1635
1636         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1637         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1638                                         null_blob,
1639                                         &request);
1640
1641         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1642                 data_blob_free(&request);
1643                 prs_mem_free(auth_data);
1644                 return nt_status;
1645         }
1646
1647         /* Wrap this in SPNEGO. */
1648         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1649
1650         data_blob_free(&request);
1651
1652         /* Auth len in the rpc header doesn't include auth_header. */
1653         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1654                 data_blob_free(&spnego_msg);
1655                 prs_mem_free(auth_data);
1656                 return NT_STATUS_NO_MEMORY;
1657         }
1658
1659         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1660         dump_data(5, spnego_msg.data, spnego_msg.length);
1661
1662         data_blob_free(&spnego_msg);
1663         return NT_STATUS_OK;
1664 }
1665
1666 /*******************************************************************
1667  Creates NTLMSSP auth bind.
1668  ********************************************************************/
1669
1670 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1671                                                 enum pipe_auth_level auth_level,
1672                                                 RPC_HDR_AUTH *pauth_out,
1673                                                 prs_struct *auth_data)
1674 {
1675         NTSTATUS nt_status;
1676         DATA_BLOB null_blob = data_blob_null;
1677         DATA_BLOB request = data_blob_null;
1678
1679         /* We may change the pad length before marshalling. */
1680         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1681
1682         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1683         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1684                                         null_blob,
1685                                         &request);
1686
1687         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1688                 data_blob_free(&request);
1689                 prs_mem_free(auth_data);
1690                 return nt_status;
1691         }
1692
1693         /* Auth len in the rpc header doesn't include auth_header. */
1694         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1695                 data_blob_free(&request);
1696                 prs_mem_free(auth_data);
1697                 return NT_STATUS_NO_MEMORY;
1698         }
1699
1700         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1701         dump_data(5, request.data, request.length);
1702
1703         data_blob_free(&request);
1704         return NT_STATUS_OK;
1705 }
1706
1707 /*******************************************************************
1708  Creates schannel auth bind.
1709  ********************************************************************/
1710
1711 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1712                                                 enum pipe_auth_level auth_level,
1713                                                 RPC_HDR_AUTH *pauth_out,
1714                                                 prs_struct *auth_data)
1715 {
1716         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1717
1718         /* We may change the pad length before marshalling. */
1719         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1720
1721         /* Use lp_workgroup() if domain not specified */
1722
1723         if (!cli->auth->domain || !cli->auth->domain[0]) {
1724                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1725                 if (cli->auth->domain == NULL) {
1726                         return NT_STATUS_NO_MEMORY;
1727                 }
1728         }
1729
1730         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1731                                    global_myname());
1732
1733         /*
1734          * Now marshall the data into the auth parse_struct.
1735          */
1736
1737         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1738                                        &schannel_neg, auth_data, 0)) {
1739                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1740                 prs_mem_free(auth_data);
1741                 return NT_STATUS_NO_MEMORY;
1742         }
1743
1744         return NT_STATUS_OK;
1745 }
1746
1747 /*******************************************************************
1748  Creates the internals of a DCE/RPC bind request or alter context PDU.
1749  ********************************************************************/
1750
1751 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1752                                                 prs_struct *rpc_out, 
1753                                                 uint32 rpc_call_id,
1754                                                 const RPC_IFACE *abstract,
1755                                                 const RPC_IFACE *transfer,
1756                                                 RPC_HDR_AUTH *phdr_auth,
1757                                                 prs_struct *pauth_info)
1758 {
1759         RPC_HDR hdr;
1760         RPC_HDR_RB hdr_rb;
1761         RPC_CONTEXT rpc_ctx;
1762         uint16 auth_len = prs_offset(pauth_info);
1763         uint8 ss_padding_len = 0;
1764         uint16 frag_len = 0;
1765
1766         /* create the RPC context. */
1767         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1768
1769         /* create the bind request RPC_HDR_RB */
1770         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1771
1772         /* Start building the frag length. */
1773         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1774
1775         /* Do we need to pad ? */
1776         if (auth_len) {
1777                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1778                 if (data_len % 8) {
1779                         ss_padding_len = 8 - (data_len % 8);
1780                         phdr_auth->auth_pad_len = ss_padding_len;
1781                 }
1782                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1783         }
1784
1785         /* Create the request RPC_HDR */
1786         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1787
1788         /* Marshall the RPC header */
1789         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1790                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1791                 return NT_STATUS_NO_MEMORY;
1792         }
1793
1794         /* Marshall the bind request data */
1795         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1796                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1797                 return NT_STATUS_NO_MEMORY;
1798         }
1799
1800         /*
1801          * Grow the outgoing buffer to store any auth info.
1802          */
1803
1804         if(auth_len != 0) {
1805                 if (ss_padding_len) {
1806                         char pad[8];
1807                         memset(pad, '\0', 8);
1808                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1809                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1810                                 return NT_STATUS_NO_MEMORY;
1811                         }
1812                 }
1813
1814                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1815                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1816                         return NT_STATUS_NO_MEMORY;
1817                 }
1818
1819
1820                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1821                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1822                         return NT_STATUS_NO_MEMORY;
1823                 }
1824         }
1825
1826         return NT_STATUS_OK;
1827 }
1828
1829 /*******************************************************************
1830  Creates a DCE/RPC bind request.
1831  ********************************************************************/
1832
1833 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1834                                 prs_struct *rpc_out, 
1835                                 uint32 rpc_call_id,
1836                                 const RPC_IFACE *abstract,
1837                                 const RPC_IFACE *transfer,
1838                                 enum pipe_auth_type auth_type,
1839                                 enum pipe_auth_level auth_level)
1840 {
1841         RPC_HDR_AUTH hdr_auth;
1842         prs_struct auth_info;
1843         NTSTATUS ret = NT_STATUS_OK;
1844
1845         ZERO_STRUCT(hdr_auth);
1846         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1847                 return NT_STATUS_NO_MEMORY;
1848
1849         switch (auth_type) {
1850                 case PIPE_AUTH_TYPE_SCHANNEL:
1851                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1852                         if (!NT_STATUS_IS_OK(ret)) {
1853                                 prs_mem_free(&auth_info);
1854                                 return ret;
1855                         }
1856                         break;
1857
1858                 case PIPE_AUTH_TYPE_NTLMSSP:
1859                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1860                         if (!NT_STATUS_IS_OK(ret)) {
1861                                 prs_mem_free(&auth_info);
1862                                 return ret;
1863                         }
1864                         break;
1865
1866                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1867                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1868                         if (!NT_STATUS_IS_OK(ret)) {
1869                                 prs_mem_free(&auth_info);
1870                                 return ret;
1871                         }
1872                         break;
1873
1874                 case PIPE_AUTH_TYPE_KRB5:
1875                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1876                         if (!NT_STATUS_IS_OK(ret)) {
1877                                 prs_mem_free(&auth_info);
1878                                 return ret;
1879                         }
1880                         break;
1881
1882                 case PIPE_AUTH_TYPE_NONE:
1883                         break;
1884
1885                 default:
1886                         /* "Can't" happen. */
1887                         return NT_STATUS_INVALID_INFO_CLASS;
1888         }
1889
1890         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1891                                                 rpc_out, 
1892                                                 rpc_call_id,
1893                                                 abstract,
1894                                                 transfer,
1895                                                 &hdr_auth,
1896                                                 &auth_info);
1897
1898         prs_mem_free(&auth_info);
1899         return ret;
1900 }
1901
1902 /*******************************************************************
1903  Create and add the NTLMSSP sign/seal auth header and data.
1904  ********************************************************************/
1905
1906 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1907                                         RPC_HDR *phdr,
1908                                         uint32 ss_padding_len,
1909                                         prs_struct *outgoing_pdu)
1910 {
1911         RPC_HDR_AUTH auth_info;
1912         NTSTATUS status;
1913         DATA_BLOB auth_blob = data_blob_null;
1914         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1915
1916         if (!cli->auth->a_u.ntlmssp_state) {
1917                 return NT_STATUS_INVALID_PARAMETER;
1918         }
1919
1920         /* Init and marshall the auth header. */
1921         init_rpc_hdr_auth(&auth_info,
1922                         map_pipe_auth_type_to_rpc_auth_type(
1923                                 cli->auth->auth_type),
1924                         cli->auth->auth_level,
1925                         ss_padding_len,
1926                         1 /* context id. */);
1927
1928         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1929                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1930                 data_blob_free(&auth_blob);
1931                 return NT_STATUS_NO_MEMORY;
1932         }
1933
1934         switch (cli->auth->auth_level) {
1935                 case PIPE_AUTH_LEVEL_PRIVACY:
1936                         /* Data portion is encrypted. */
1937                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1938                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1939                                         data_and_pad_len,
1940                                         (unsigned char *)prs_data_p(outgoing_pdu),
1941                                         (size_t)prs_offset(outgoing_pdu),
1942                                         &auth_blob);
1943                         if (!NT_STATUS_IS_OK(status)) {
1944                                 data_blob_free(&auth_blob);
1945                                 return status;
1946                         }
1947                         break;
1948
1949                 case PIPE_AUTH_LEVEL_INTEGRITY:
1950                         /* Data is signed. */
1951                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1952                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1953                                         data_and_pad_len,
1954                                         (unsigned char *)prs_data_p(outgoing_pdu),
1955                                         (size_t)prs_offset(outgoing_pdu),
1956                                         &auth_blob);
1957                         if (!NT_STATUS_IS_OK(status)) {
1958                                 data_blob_free(&auth_blob);
1959                                 return status;
1960                         }
1961                         break;
1962
1963                 default:
1964                         /* Can't happen. */
1965                         smb_panic("bad auth level");
1966                         /* Notreached. */
1967                         return NT_STATUS_INVALID_PARAMETER;
1968         }
1969
1970         /* Finally marshall the blob. */
1971
1972         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1973                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1974                         (unsigned int)NTLMSSP_SIG_SIZE));
1975                 data_blob_free(&auth_blob);
1976                 return NT_STATUS_NO_MEMORY;
1977         }
1978
1979         data_blob_free(&auth_blob);
1980         return NT_STATUS_OK;
1981 }
1982
1983 /*******************************************************************
1984  Create and add the schannel sign/seal auth header and data.
1985  ********************************************************************/
1986
1987 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1988                                         RPC_HDR *phdr,
1989                                         uint32 ss_padding_len,
1990                                         prs_struct *outgoing_pdu)
1991 {
1992         RPC_HDR_AUTH auth_info;
1993         RPC_AUTH_SCHANNEL_CHK verf;
1994         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1995         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1996         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1997
1998         if (!sas) {
1999                 return NT_STATUS_INVALID_PARAMETER;
2000         }
2001
2002         /* Init and marshall the auth header. */
2003         init_rpc_hdr_auth(&auth_info,
2004                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2005                         cli->auth->auth_level,
2006                         ss_padding_len,
2007                         1 /* context id. */);
2008
2009         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2010                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2011                 return NT_STATUS_NO_MEMORY;
2012         }
2013
2014         switch (cli->auth->auth_level) {
2015                 case PIPE_AUTH_LEVEL_PRIVACY:
2016                 case PIPE_AUTH_LEVEL_INTEGRITY:
2017                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2018                                 sas->seq_num));
2019
2020                         schannel_encode(sas,
2021                                         cli->auth->auth_level,
2022                                         SENDER_IS_INITIATOR,
2023                                         &verf,
2024                                         data_p,
2025                                         data_and_pad_len);
2026
2027                         sas->seq_num++;
2028                         break;
2029
2030                 default:
2031                         /* Can't happen. */
2032                         smb_panic("bad auth level");
2033                         /* Notreached. */
2034                         return NT_STATUS_INVALID_PARAMETER;
2035         }
2036
2037         /* Finally marshall the blob. */
2038         smb_io_rpc_auth_schannel_chk("",
2039                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2040                         &verf,
2041                         outgoing_pdu,
2042                         0);
2043
2044         return NT_STATUS_OK;
2045 }
2046
2047 /*******************************************************************
2048  Calculate how much data we're going to send in this packet, also
2049  work out any sign/seal padding length.
2050  ********************************************************************/
2051
2052 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2053                                         uint32 data_left,
2054                                         uint16 *p_frag_len,
2055                                         uint16 *p_auth_len,
2056                                         uint32 *p_ss_padding)
2057 {
2058         uint32 data_space, data_len;
2059
2060 #ifdef DEVELOPER
2061         if ((data_left > 0) && (sys_random() % 2)) {
2062                 data_left = MAX(data_left/2, 1);
2063         }
2064 #endif
2065
2066         switch (cli->auth->auth_level) {
2067                 case PIPE_AUTH_LEVEL_NONE:
2068                 case PIPE_AUTH_LEVEL_CONNECT:
2069                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2070                         data_len = MIN(data_space, data_left);
2071                         *p_ss_padding = 0;
2072                         *p_auth_len = 0;
2073                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2074                         return data_len;
2075
2076                 case PIPE_AUTH_LEVEL_INTEGRITY:
2077                 case PIPE_AUTH_LEVEL_PRIVACY:
2078                         /* Treat the same for all authenticated rpc requests. */
2079                         switch(cli->auth->auth_type) {
2080                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2081                                 case PIPE_AUTH_TYPE_NTLMSSP:
2082                                         *p_auth_len = NTLMSSP_SIG_SIZE;
2083                                         break;
2084                                 case PIPE_AUTH_TYPE_SCHANNEL:
2085                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2086                                         break;
2087                                 default:
2088                                         smb_panic("bad auth type");
2089                                         break;
2090                         }
2091
2092                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2093                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2094
2095                         data_len = MIN(data_space, data_left);
2096                         *p_ss_padding = 0;
2097                         if (data_len % 8) {
2098                                 *p_ss_padding = 8 - (data_len % 8);
2099                         }
2100                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2101                                         data_len + *p_ss_padding +              /* data plus padding. */
2102                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2103                         return data_len;
2104
2105                 default:
2106                         smb_panic("bad auth level");
2107                         /* Notreached. */
2108                         return 0;
2109         }
2110 }
2111
2112 /*******************************************************************
2113  External interface.
2114  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2115  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2116  and deals with signing/sealing details.
2117  ********************************************************************/
2118
2119 struct rpc_api_pipe_req_state {
2120         struct event_context *ev;
2121         struct rpc_pipe_client *cli;
2122         uint8_t op_num;
2123         uint32_t call_id;
2124         prs_struct *req_data;
2125         uint32_t req_data_sent;
2126         prs_struct outgoing_frag;
2127         prs_struct reply_pdu;
2128 };
2129
2130 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2131 {
2132         prs_mem_free(&s->outgoing_frag);
2133         prs_mem_free(&s->reply_pdu);
2134         return 0;
2135 }
2136
2137 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2138 static void rpc_api_pipe_req_done(struct async_req *subreq);
2139 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2140                                   bool *is_last_frag);
2141
2142 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2143                                         struct event_context *ev,
2144                                         struct rpc_pipe_client *cli,
2145                                         uint8_t op_num,
2146                                         prs_struct *req_data)
2147 {
2148         struct async_req *result, *subreq;
2149         struct rpc_api_pipe_req_state *state;
2150         NTSTATUS status;
2151         bool is_last_frag;
2152
2153         if (!async_req_setup(mem_ctx, &result, &state,
2154                              struct rpc_api_pipe_req_state)) {
2155                 return NULL;
2156         }
2157         state->ev = ev;
2158         state->cli = cli;
2159         state->op_num = op_num;
2160         state->req_data = req_data;
2161         state->req_data_sent = 0;
2162         state->call_id = get_rpc_call_id();
2163
2164         if (cli->max_xmit_frag
2165             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2166                 /* Server is screwed up ! */
2167                 status = NT_STATUS_INVALID_PARAMETER;
2168                 goto post_status;
2169         }
2170
2171         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2172
2173         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2174                       state, MARSHALL)) {
2175                 status = NT_STATUS_NO_MEMORY;
2176                 goto post_status;
2177         }
2178
2179         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2180
2181         status = prepare_next_frag(state, &is_last_frag);
2182         if (!NT_STATUS_IS_OK(status)) {
2183                 goto post_status;
2184         }
2185
2186         if (is_last_frag) {
2187                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2188                                            &state->outgoing_frag,
2189                                            RPC_RESPONSE);
2190                 if (subreq == NULL) {
2191                         status = NT_STATUS_NO_MEMORY;
2192                         goto post_status;
2193                 }
2194                 subreq->async.fn = rpc_api_pipe_req_done;
2195                 subreq->async.priv = result;
2196         } else {
2197                 subreq = rpc_write_send(state, ev, cli,
2198                                         prs_data_p(&state->outgoing_frag),
2199                                         prs_offset(&state->outgoing_frag));
2200                 if (subreq == NULL) {
2201                         status = NT_STATUS_NO_MEMORY;
2202                         goto post_status;
2203                 }
2204                 subreq->async.fn = rpc_api_pipe_req_write_done;
2205                 subreq->async.priv = result;
2206         }
2207         return result;
2208
2209  post_status:
2210         if (async_post_status(result, ev, status)) {
2211                 return result;
2212         }
2213         TALLOC_FREE(result);
2214         return NULL;
2215 }
2216
2217 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2218                                   bool *is_last_frag)
2219 {
2220         RPC_HDR hdr;
2221         RPC_HDR_REQ hdr_req;
2222         uint32_t data_sent_thistime;
2223         uint16_t auth_len;
2224         uint16_t frag_len;
2225         uint8_t flags = 0;
2226         uint32_t ss_padding;
2227         uint32_t data_left;
2228         char pad[8] = { 0, };
2229         NTSTATUS status;
2230
2231         data_left = prs_offset(state->req_data) - state->req_data_sent;
2232
2233         data_sent_thistime = calculate_data_len_tosend(
2234                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2235
2236         if (state->req_data_sent == 0) {
2237                 flags = RPC_FLG_FIRST;
2238         }
2239
2240         if (data_sent_thistime == data_left) {
2241                 flags |= RPC_FLG_LAST;
2242         }
2243
2244         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2245                 return NT_STATUS_NO_MEMORY;
2246         }
2247
2248         /* Create and marshall the header and request header. */
2249         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2250                      auth_len);
2251
2252         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2253                 return NT_STATUS_NO_MEMORY;
2254         }
2255
2256         /* Create the rpc request RPC_HDR_REQ */
2257         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2258                          state->op_num);
2259
2260         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2261                                 &state->outgoing_frag, 0)) {
2262                 return NT_STATUS_NO_MEMORY;
2263         }
2264
2265         /* Copy in the data, plus any ss padding. */
2266         if (!prs_append_some_prs_data(&state->outgoing_frag,
2267                                       state->req_data, state->req_data_sent,
2268                                       data_sent_thistime)) {
2269                 return NT_STATUS_NO_MEMORY;
2270         }
2271
2272         /* Copy the sign/seal padding data. */
2273         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2274                 return NT_STATUS_NO_MEMORY;
2275         }
2276
2277         /* Generate any auth sign/seal and add the auth footer. */
2278         switch (state->cli->auth->auth_type) {
2279         case PIPE_AUTH_TYPE_NONE:
2280                 status = NT_STATUS_OK;
2281                 break;
2282         case PIPE_AUTH_TYPE_NTLMSSP:
2283         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2284                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2285                                                  &state->outgoing_frag);
2286                 break;
2287         case PIPE_AUTH_TYPE_SCHANNEL:
2288                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2289                                                   &state->outgoing_frag);
2290                 break;
2291         default:
2292                 status = NT_STATUS_INVALID_PARAMETER;
2293                 break;
2294         }
2295
2296         state->req_data_sent += data_sent_thistime;
2297         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2298
2299         return status;
2300 }
2301
2302 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2303 {
2304         struct async_req *req = talloc_get_type_abort(
2305                 subreq->async.priv, struct async_req);
2306         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2307                 req->private_data, struct rpc_api_pipe_req_state);
2308         NTSTATUS status;
2309         bool is_last_frag;
2310
2311         status = rpc_write_recv(subreq);
2312         TALLOC_FREE(subreq);
2313         if (!NT_STATUS_IS_OK(status)) {
2314                 async_req_error(req, status);
2315                 return;
2316         }
2317
2318         status = prepare_next_frag(state, &is_last_frag);
2319         if (!NT_STATUS_IS_OK(status)) {
2320                 async_req_error(req, status);
2321                 return;
2322         }
2323
2324         if (is_last_frag) {
2325                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2326                                            &state->outgoing_frag,
2327                                            RPC_RESPONSE);
2328                 if (async_req_nomem(subreq, req)) {
2329                         return;
2330                 }
2331                 subreq->async.fn = rpc_api_pipe_req_done;
2332                 subreq->async.priv = req;
2333         } else {
2334                 subreq = rpc_write_send(state, state->ev, state->cli,
2335                                         prs_data_p(&state->outgoing_frag),
2336                                         prs_offset(&state->outgoing_frag));
2337                 if (async_req_nomem(subreq, req)) {
2338                         return;
2339                 }
2340                 subreq->async.fn = rpc_api_pipe_req_write_done;
2341                 subreq->async.priv = req;
2342         }
2343 }
2344
2345 static void rpc_api_pipe_req_done(struct async_req *subreq)
2346 {
2347         struct async_req *req = talloc_get_type_abort(
2348                 subreq->async.priv, struct async_req);
2349         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2350                 req->private_data, struct rpc_api_pipe_req_state);
2351         NTSTATUS status;
2352
2353         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2354         TALLOC_FREE(subreq);
2355         if (!NT_STATUS_IS_OK(status)) {
2356                 async_req_error(req, status);
2357                 return;
2358         }
2359         async_req_done(req);
2360 }
2361
2362 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2363                                prs_struct *reply_pdu)
2364 {
2365         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2366                 req->private_data, struct rpc_api_pipe_req_state);
2367         NTSTATUS status;
2368
2369         if (async_req_is_error(req, &status)) {
2370                 /*
2371                  * We always have to initialize to reply pdu, even if there is
2372                  * none. The rpccli_* caller routines expect this.
2373                  */
2374                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2375                 return status;
2376         }
2377
2378         *reply_pdu = state->reply_pdu;
2379         reply_pdu->mem_ctx = mem_ctx;
2380
2381         /*
2382          * Prevent state->req_pdu from being freed in
2383          * rpc_api_pipe_req_state_destructor()
2384          */
2385         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2386
2387         return NT_STATUS_OK;
2388 }
2389
2390 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2391                         uint8 op_num,
2392                         prs_struct *in_data,
2393                         prs_struct *out_data)
2394 {
2395         TALLOC_CTX *frame = talloc_stackframe();
2396         struct event_context *ev;
2397         struct async_req *req;
2398         NTSTATUS status = NT_STATUS_NO_MEMORY;
2399
2400         ev = event_context_init(frame);
2401         if (ev == NULL) {
2402                 goto fail;
2403         }
2404
2405         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2406         if (req == NULL) {
2407                 goto fail;
2408         }
2409
2410         while (req->state < ASYNC_REQ_DONE) {
2411                 event_loop_once(ev);
2412         }
2413
2414         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2415  fail:
2416         TALLOC_FREE(frame);
2417         return status;
2418 }
2419
2420 #if 0
2421 /****************************************************************************
2422  Set the handle state.
2423 ****************************************************************************/
2424
2425 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2426                                    const char *pipe_name, uint16 device_state)
2427 {
2428         bool state_set = False;
2429         char param[2];
2430         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2431         char *rparam = NULL;
2432         char *rdata = NULL;
2433         uint32 rparam_len, rdata_len;
2434
2435         if (pipe_name == NULL)
2436                 return False;
2437
2438         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2439                  cli->fnum, pipe_name, device_state));
2440
2441         /* create parameters: device state */
2442         SSVAL(param, 0, device_state);
2443
2444         /* create setup parameters. */
2445         setup[0] = 0x0001; 
2446         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2447
2448         /* send the data on \PIPE\ */
2449         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2450                     setup, 2, 0,                /* setup, length, max */
2451                     param, 2, 0,                /* param, length, max */
2452                     NULL, 0, 1024,              /* data, length, max */
2453                     &rparam, &rparam_len,        /* return param, length */
2454                     &rdata, &rdata_len))         /* return data, length */
2455         {
2456                 DEBUG(5, ("Set Handle state: return OK\n"));
2457                 state_set = True;
2458         }
2459
2460         SAFE_FREE(rparam);
2461         SAFE_FREE(rdata);
2462
2463         return state_set;
2464 }
2465 #endif
2466
2467 /****************************************************************************
2468  Check the rpc bind acknowledge response.
2469 ****************************************************************************/
2470
2471 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2472 {
2473         if ( hdr_ba->addr.len == 0) {
2474                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2475         }
2476
2477         /* check the transfer syntax */
2478         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2479              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2480                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2481                 return False;
2482         }
2483
2484         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2485                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2486                           hdr_ba->res.num_results, hdr_ba->res.reason));
2487         }
2488
2489         DEBUG(5,("check_bind_response: accepted!\n"));
2490         return True;
2491 }
2492
2493 /*******************************************************************
2494  Creates a DCE/RPC bind authentication response.
2495  This is the packet that is sent back to the server once we
2496  have received a BIND-ACK, to finish the third leg of
2497  the authentication handshake.
2498  ********************************************************************/
2499
2500 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2501                                 uint32 rpc_call_id,
2502                                 enum pipe_auth_type auth_type,
2503                                 enum pipe_auth_level auth_level,
2504                                 DATA_BLOB *pauth_blob,
2505                                 prs_struct *rpc_out)
2506 {
2507         RPC_HDR hdr;
2508         RPC_HDR_AUTH hdr_auth;
2509         uint32 pad = 0;
2510
2511         /* Create the request RPC_HDR */
2512         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2513                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2514                      pauth_blob->length );
2515
2516         /* Marshall it. */
2517         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2518                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2519                 return NT_STATUS_NO_MEMORY;
2520         }
2521
2522         /*
2523                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2524                 about padding - shouldn't this pad to length 8 ? JRA.
2525         */
2526
2527         /* 4 bytes padding. */
2528         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2529                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2530                 return NT_STATUS_NO_MEMORY;
2531         }
2532
2533         /* Create the request RPC_HDR_AUTHA */
2534         init_rpc_hdr_auth(&hdr_auth,
2535                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2536                         auth_level, 0, 1);
2537
2538         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2539                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2540                 return NT_STATUS_NO_MEMORY;
2541         }
2542
2543         /*
2544          * Append the auth data to the outgoing buffer.
2545          */
2546
2547         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2548                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2549                 return NT_STATUS_NO_MEMORY;
2550         }
2551
2552         return NT_STATUS_OK;
2553 }
2554
2555 /*******************************************************************
2556  Creates a DCE/RPC bind alter context authentication request which
2557  may contain a spnego auth blobl
2558  ********************************************************************/
2559
2560 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2561                                         const RPC_IFACE *abstract,
2562                                         const RPC_IFACE *transfer,
2563                                         enum pipe_auth_level auth_level,
2564                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2565                                         prs_struct *rpc_out)
2566 {
2567         RPC_HDR_AUTH hdr_auth;
2568         prs_struct auth_info;
2569         NTSTATUS ret = NT_STATUS_OK;
2570
2571         ZERO_STRUCT(hdr_auth);
2572         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2573                 return NT_STATUS_NO_MEMORY;
2574
2575         /* We may change the pad length before marshalling. */
2576         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2577
2578         if (pauth_blob->length) {
2579                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2580                         prs_mem_free(&auth_info);
2581                         return NT_STATUS_NO_MEMORY;
2582                 }
2583         }
2584
2585         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2586                                                 rpc_out, 
2587                                                 rpc_call_id,
2588                                                 abstract,
2589                                                 transfer,
2590                                                 &hdr_auth,
2591                                                 &auth_info);
2592         prs_mem_free(&auth_info);
2593         return ret;
2594 }
2595
2596 /****************************************************************************
2597  Do an rpc bind.
2598 ****************************************************************************/
2599
2600 struct rpc_pipe_bind_state {
2601         struct event_context *ev;
2602         struct rpc_pipe_client *cli;
2603         prs_struct rpc_out;
2604         uint32_t rpc_call_id;
2605 };
2606
2607 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2608 {
2609         prs_mem_free(&state->rpc_out);
2610         return 0;
2611 }
2612
2613 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2614 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2615                                            struct rpc_pipe_bind_state *state,
2616                                            struct rpc_hdr_info *phdr,
2617                                            prs_struct *reply_pdu);
2618 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2619 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2620                                                     struct rpc_pipe_bind_state *state,
2621                                                     struct rpc_hdr_info *phdr,
2622                                                     prs_struct *reply_pdu);
2623 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2624
2625 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2626                                      struct event_context *ev,
2627                                      struct rpc_pipe_client *cli,
2628                                      struct cli_pipe_auth_data *auth)
2629 {
2630         struct async_req *result, *subreq;
2631         struct rpc_pipe_bind_state *state;
2632         NTSTATUS status;
2633
2634         if (!async_req_setup(mem_ctx, &result, &state,
2635                              struct rpc_pipe_bind_state)) {
2636                 return NULL;
2637         }
2638
2639         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2640                 rpccli_pipe_txt(debug_ctx(), cli),
2641                 (unsigned int)auth->auth_type,
2642                 (unsigned int)auth->auth_level ));
2643
2644         state->ev = ev;
2645         state->cli = cli;
2646         state->rpc_call_id = get_rpc_call_id();
2647
2648         prs_init_empty(&state->rpc_out, state, MARSHALL);
2649         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2650
2651         cli->auth = talloc_move(cli, &auth);
2652
2653         /* Marshall the outgoing data. */
2654         status = create_rpc_bind_req(cli, &state->rpc_out,
2655                                      state->rpc_call_id,
2656                                      &cli->abstract_syntax,
2657                                      &cli->transfer_syntax,
2658                                      cli->auth->auth_type,
2659                                      cli->auth->auth_level);
2660
2661         if (!NT_STATUS_IS_OK(status)) {
2662                 goto post_status;
2663         }
2664
2665         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2666                                    RPC_BINDACK);
2667         if (subreq == NULL) {
2668                 status = NT_STATUS_NO_MEMORY;
2669                 goto post_status;
2670         }
2671         subreq->async.fn = rpc_pipe_bind_step_one_done;
2672         subreq->async.priv = result;
2673         return result;
2674
2675  post_status:
2676         if (async_post_status(result, ev, status)) {
2677                 return result;
2678         }
2679         TALLOC_FREE(result);
2680         return NULL;
2681 }
2682
2683 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2684 {
2685         struct async_req *req = talloc_get_type_abort(
2686                 subreq->async.priv, struct async_req);
2687         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2688                 req->private_data, struct rpc_pipe_bind_state);
2689         prs_struct reply_pdu;
2690         struct rpc_hdr_info hdr;
2691         struct rpc_hdr_ba_info hdr_ba;
2692         NTSTATUS status;
2693
2694         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2695         TALLOC_FREE(subreq);
2696         if (!NT_STATUS_IS_OK(status)) {
2697                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2698                           rpccli_pipe_txt(debug_ctx(), state->cli),
2699                           nt_errstr(status)));
2700                 async_req_error(req, status);
2701                 return;
2702         }
2703
2704         /* Unmarshall the RPC header */
2705         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2706                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2707                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2708                 return;
2709         }
2710
2711         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2712                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2713                           "RPC_HDR_BA.\n"));
2714                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2715                 return;
2716         }
2717
2718         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2719                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2720                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2721                 return;
2722         }
2723
2724         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2725         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2726
2727         /*
2728          * For authenticated binds we may need to do 3 or 4 leg binds.
2729          */
2730
2731         switch(state->cli->auth->auth_type) {
2732
2733         case PIPE_AUTH_TYPE_NONE:
2734         case PIPE_AUTH_TYPE_SCHANNEL:
2735                 /* Bind complete. */
2736                 async_req_done(req);
2737                 break;
2738
2739         case PIPE_AUTH_TYPE_NTLMSSP:
2740                 /* Need to send AUTH3 packet - no reply. */
2741                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2742                                                     &reply_pdu);
2743                 if (!NT_STATUS_IS_OK(status)) {
2744                         async_req_error(req, status);
2745                 }
2746                 break;
2747
2748         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2749                 /* Need to send alter context request and reply. */
2750                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2751                                                              &reply_pdu);
2752                 if (!NT_STATUS_IS_OK(status)) {
2753                         async_req_error(req, status);
2754                 }
2755                 break;
2756
2757         case PIPE_AUTH_TYPE_KRB5:
2758                 /* */
2759
2760         default:
2761                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2762                          (unsigned int)state->cli->auth->auth_type));
2763                 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2764         }
2765 }
2766
2767 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2768                                            struct rpc_pipe_bind_state *state,
2769                                            struct rpc_hdr_info *phdr,
2770                                            prs_struct *reply_pdu)
2771 {
2772         DATA_BLOB server_response = data_blob_null;
2773         DATA_BLOB client_reply = data_blob_null;
2774         struct rpc_hdr_auth_info hdr_auth;
2775         struct async_req *subreq;
2776         NTSTATUS status;
2777
2778         if ((phdr->auth_len == 0)
2779             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2780                 return NT_STATUS_INVALID_PARAMETER;
2781         }
2782
2783         if (!prs_set_offset(
2784                     reply_pdu,
2785                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2786                 return NT_STATUS_INVALID_PARAMETER;
2787         }
2788
2789         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2790                 return NT_STATUS_INVALID_PARAMETER;
2791         }
2792
2793         /* TODO - check auth_type/auth_level match. */
2794
2795         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2796         prs_copy_data_out((char *)server_response.data, reply_pdu,
2797                           phdr->auth_len);
2798
2799         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2800                                 server_response, &client_reply);
2801
2802         if (!NT_STATUS_IS_OK(status)) {
2803                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2804                           "blob failed: %s.\n", nt_errstr(status)));
2805                 return status;
2806         }
2807
2808         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2809
2810         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2811                                        state->cli->auth->auth_type,
2812                                        state->cli->auth->auth_level,
2813                                        &client_reply, &state->rpc_out);
2814         data_blob_free(&client_reply);
2815
2816         if (!NT_STATUS_IS_OK(status)) {
2817                 return status;
2818         }
2819
2820         subreq = rpc_write_send(state, state->ev, state->cli,
2821                                 prs_data_p(&state->rpc_out),
2822                                 prs_offset(&state->rpc_out));
2823         if (subreq == NULL) {
2824                 return NT_STATUS_NO_MEMORY;
2825         }
2826         subreq->async.fn = rpc_bind_auth3_write_done;
2827         subreq->async.priv = req;
2828         return NT_STATUS_OK;
2829 }
2830
2831 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2832 {
2833         struct async_req *req = talloc_get_type_abort(
2834                 subreq->async.priv, struct async_req);
2835         NTSTATUS status;
2836
2837         status = rpc_write_recv(subreq);
2838         TALLOC_FREE(subreq);
2839         if (!NT_STATUS_IS_OK(status)) {
2840                 async_req_error(req, status);
2841                 return;
2842         }
2843         async_req_done(req);
2844 }
2845
2846 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2847                                                     struct rpc_pipe_bind_state *state,
2848                                                     struct rpc_hdr_info *phdr,
2849                                                     prs_struct *reply_pdu)
2850 {
2851         DATA_BLOB server_spnego_response = data_blob_null;
2852         DATA_BLOB server_ntlm_response = data_blob_null;
2853         DATA_BLOB client_reply = data_blob_null;
2854         DATA_BLOB tmp_blob = data_blob_null;
2855         RPC_HDR_AUTH hdr_auth;
2856         struct async_req *subreq;
2857         NTSTATUS status;
2858
2859         if ((phdr->auth_len == 0)
2860             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2861                 return NT_STATUS_INVALID_PARAMETER;
2862         }
2863
2864         /* Process the returned NTLMSSP blob first. */
2865         if (!prs_set_offset(
2866                     reply_pdu,
2867                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2868                 return NT_STATUS_INVALID_PARAMETER;
2869         }
2870
2871         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2872                 return NT_STATUS_INVALID_PARAMETER;
2873         }
2874
2875         server_spnego_response = data_blob(NULL, phdr->auth_len);
2876         prs_copy_data_out((char *)server_spnego_response.data,
2877                           reply_pdu, phdr->auth_len);
2878
2879         /*
2880          * The server might give us back two challenges - tmp_blob is for the
2881          * second.
2882          */
2883         if (!spnego_parse_challenge(server_spnego_response,
2884                                     &server_ntlm_response, &tmp_blob)) {
2885                 data_blob_free(&server_spnego_response);
2886                 data_blob_free(&server_ntlm_response);
2887                 data_blob_free(&tmp_blob);
2888                 return NT_STATUS_INVALID_PARAMETER;
2889         }
2890
2891         /* We're finished with the server spnego response and the tmp_blob. */
2892         data_blob_free(&server_spnego_response);
2893         data_blob_free(&tmp_blob);
2894
2895         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2896                                 server_ntlm_response, &client_reply);
2897
2898         /* Finished with the server_ntlm response */
2899         data_blob_free(&server_ntlm_response);
2900
2901         if (!NT_STATUS_IS_OK(status)) {
2902                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2903                           "using server blob failed.\n"));
2904                 data_blob_free(&client_reply);
2905                 return status;
2906         }
2907
2908         /* SPNEGO wrap the client reply. */
2909         tmp_blob = spnego_gen_auth(client_reply);
2910         data_blob_free(&client_reply);
2911         client_reply = tmp_blob;
2912         tmp_blob = data_blob_null;
2913
2914         /* Now prepare the alter context pdu. */
2915         prs_init_empty(&state->rpc_out, state, MARSHALL);
2916
2917         status = create_rpc_alter_context(state->rpc_call_id,
2918                                           &state->cli->abstract_syntax,
2919                                           &state->cli->transfer_syntax,
2920                                           state->cli->auth->auth_level,
2921                                           &client_reply,
2922                                           &state->rpc_out);
2923         data_blob_free(&client_reply);
2924
2925         if (!NT_STATUS_IS_OK(status)) {
2926                 return status;
2927         }
2928
2929         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2930                                    &state->rpc_out, RPC_ALTCONTRESP);
2931         if (subreq == NULL) {
2932                 return NT_STATUS_NO_MEMORY;
2933         }
2934         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2935         subreq->async.priv = req;
2936         return NT_STATUS_OK;
2937 }
2938
2939 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2940 {
2941         struct async_req *req = talloc_get_type_abort(
2942                 subreq->async.priv, struct async_req);
2943         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2944                 req->private_data, struct rpc_pipe_bind_state);
2945         DATA_BLOB server_spnego_response = data_blob_null;
2946         DATA_BLOB tmp_blob = data_blob_null;
2947         prs_struct reply_pdu;
2948         struct rpc_hdr_info hdr;
2949         struct rpc_hdr_auth_info hdr_auth;
2950         NTSTATUS status;
2951
2952         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2953         TALLOC_FREE(subreq);
2954         if (!NT_STATUS_IS_OK(status)) {
2955                 async_req_error(req, status);
2956                 return;
2957         }
2958
2959         /* Get the auth blob from the reply. */
2960         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2961                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2962                           "unmarshall RPC_HDR.\n"));
2963                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2964                 return;
2965         }
2966
2967         if (!prs_set_offset(
2968                     &reply_pdu,
2969                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2970                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2971                 return;
2972         }
2973
2974         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2975                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2976                 return;
2977         }
2978
2979         server_spnego_response = data_blob(NULL, hdr.auth_len);
2980         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2981                           hdr.auth_len);
2982
2983         /* Check we got a valid auth response. */
2984         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2985                                         OID_NTLMSSP, &tmp_blob)) {
2986                 data_blob_free(&server_spnego_response);
2987                 data_blob_free(&tmp_blob);
2988                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2989                 return;
2990         }
2991
2992         data_blob_free(&server_spnego_response);
2993         data_blob_free(&tmp_blob);
2994
2995         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2996                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2997         async_req_done(req);
2998 }
2999
3000 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
3001 {
3002         return async_req_simple_recv(req);
3003 }
3004
3005 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3006                        struct cli_pipe_auth_data *auth)
3007 {
3008         TALLOC_CTX *frame = talloc_stackframe();
3009         struct event_context *ev;
3010         struct async_req *req;
3011         NTSTATUS status = NT_STATUS_NO_MEMORY;
3012
3013         ev = event_context_init(frame);
3014         if (ev == NULL) {
3015                 goto fail;
3016         }
3017
3018         req = rpc_pipe_bind_send(frame, ev, cli, auth);
3019         if (req == NULL) {
3020                 goto fail;
3021         }
3022
3023         while (req->state < ASYNC_REQ_DONE) {
3024                 event_loop_once(ev);
3025         }
3026
3027         status = rpc_pipe_bind_recv(req);
3028  fail:
3029         TALLOC_FREE(frame);
3030         return status;
3031 }
3032
3033 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3034                                 unsigned int timeout)
3035 {
3036         return cli_set_timeout(cli->trans.np.cli, timeout);
3037 }
3038
3039 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3040 {
3041         if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3042             || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3043                 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3044                 return true;
3045         }
3046
3047         if (cli->transport_type == NCACN_NP) {
3048                 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3049                 return true;
3050         }
3051
3052         return false;
3053 }
3054
3055 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3056 {
3057         if (p->transport_type == NCACN_NP) {
3058                 return p->trans.np.cli;
3059         }
3060         return NULL;
3061 }
3062
3063 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3064 {
3065         if (p->transport_type == NCACN_NP) {
3066                 bool ret;
3067                 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3068                 if (!ret) {
3069                         DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3070                                   "pipe %s. Error was %s\n",
3071                                   rpccli_pipe_txt(debug_ctx(), p),
3072                                   cli_errstr(p->trans.np.cli)));
3073                 }
3074
3075                 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3076                            rpccli_pipe_txt(debug_ctx(), p)));
3077
3078                 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3079                 return ret ? -1 : 0;
3080         }
3081
3082         return -1;
3083 }
3084
3085 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3086                                struct cli_pipe_auth_data **presult)
3087 {
3088         struct cli_pipe_auth_data *result;
3089
3090         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3091         if (result == NULL) {
3092                 return NT_STATUS_NO_MEMORY;
3093         }
3094
3095         result->auth_type = PIPE_AUTH_TYPE_NONE;
3096         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3097
3098         result->user_name = talloc_strdup(result, "");
3099         result->domain = talloc_strdup(result, "");
3100         if ((result->user_name == NULL) || (result->domain == NULL)) {
3101                 TALLOC_FREE(result);
3102                 return NT_STATUS_NO_MEMORY;
3103         }
3104
3105         *presult = result;
3106         return NT_STATUS_OK;
3107 }
3108
3109 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3110 {
3111         ntlmssp_end(&auth->a_u.ntlmssp_state);
3112         return 0;
3113 }
3114
3115 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3116                                   enum pipe_auth_type auth_type,
3117                                   enum pipe_auth_level auth_level,
3118                                   const char *domain,
3119                                   const char *username,
3120                                   const char *password,
3121                                   struct cli_pipe_auth_data **presult)
3122 {
3123         struct cli_pipe_auth_data *result;
3124         NTSTATUS status;
3125
3126         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3127         if (result == NULL) {
3128                 return NT_STATUS_NO_MEMORY;
3129         }
3130
3131         result->auth_type = auth_type;
3132         result->auth_level = auth_level;
3133
3134         result->user_name = talloc_strdup(result, username);
3135         result->domain = talloc_strdup(result, domain);
3136         if ((result->user_name == NULL) || (result->domain == NULL)) {
3137                 status = NT_STATUS_NO_MEMORY;
3138                 goto fail;
3139         }
3140
3141         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3142         if (!NT_STATUS_IS_OK(status)) {
3143                 goto fail;
3144         }
3145
3146         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3147
3148         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3149         if (!NT_STATUS_IS_OK(status)) {
3150                 goto fail;
3151         }
3152
3153         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3154         if (!NT_STATUS_IS_OK(status)) {
3155                 goto fail;
3156         }
3157
3158         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3159         if (!NT_STATUS_IS_OK(status)) {
3160                 goto fail;
3161         }
3162
3163         /*
3164          * Turn off sign+seal to allow selected auth level to turn it back on.
3165          */
3166         result->a_u.ntlmssp_state->neg_flags &=
3167                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3168
3169         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3170                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3171         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3172                 result->a_u.ntlmssp_state->neg_flags
3173                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3174         }
3175
3176         *presult = result;
3177         return NT_STATUS_OK;
3178
3179  fail:
3180         TALLOC_FREE(result);
3181         return status;
3182 }
3183
3184 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3185                                    enum pipe_auth_level auth_level,
3186                                    const uint8_t sess_key[16],
3187                                    struct cli_pipe_auth_data **presult)
3188 {
3189         struct cli_pipe_auth_data *result;
3190
3191         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3192         if (result == NULL) {
3193                 return NT_STATUS_NO_MEMORY;
3194         }
3195
3196         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3197         result->auth_level = auth_level;
3198
3199         result->user_name = talloc_strdup(result, "");
3200         result->domain = talloc_strdup(result, domain);
3201         if ((result->user_name == NULL) || (result->domain == NULL)) {
3202                 goto fail;
3203         }
3204
3205         result->a_u.schannel_auth = talloc(result,
3206                                            struct schannel_auth_struct);
3207         if (result->a_u.schannel_auth == NULL) {
3208                 goto fail;
3209         }
3210
3211         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3212                sizeof(result->a_u.schannel_auth->sess_key));
3213         result->a_u.schannel_auth->seq_num = 0;
3214
3215         *presult = result;
3216         return NT_STATUS_OK;
3217
3218  fail:
3219         TALLOC_FREE(result);
3220         return NT_STATUS_NO_MEMORY;
3221 }
3222
3223 #ifdef HAVE_KRB5
3224 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3225 {
3226         data_blob_free(&auth->session_key);
3227         return 0;
3228 }
3229 #endif
3230
3231 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3232                                    enum pipe_auth_level auth_level,
3233                                    const char *service_princ,
3234                                    const char *username,
3235                                    const char *password,
3236                                    struct cli_pipe_auth_data **presult)
3237 {
3238 #ifdef HAVE_KRB5
3239         struct cli_pipe_auth_data *result;
3240
3241         if ((username != NULL) && (password != NULL)) {
3242                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3243                 if (ret != 0) {
3244                         return NT_STATUS_ACCESS_DENIED;
3245                 }
3246         }
3247
3248         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3249         if (result == NULL) {
3250                 return NT_STATUS_NO_MEMORY;
3251         }
3252
3253         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3254         result->auth_level = auth_level;
3255
3256         /*
3257          * Username / domain need fixing!
3258          */
3259         result->user_name = talloc_strdup(result, "");
3260         result->domain = talloc_strdup(result, "");
3261         if ((result->user_name == NULL) || (result->domain == NULL)) {
3262                 goto fail;
3263         }
3264
3265         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3266                 result, struct kerberos_auth_struct);
3267         if (result->a_u.kerberos_auth == NULL) {
3268                 goto fail;
3269         }
3270         talloc_set_destructor(result->a_u.kerberos_auth,
3271                               cli_auth_kerberos_data_destructor);
3272
3273         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3274                 result, service_princ);
3275         if (result->a_u.kerberos_auth->service_principal == NULL) {
3276                 goto fail;
3277         }
3278
3279         *presult = result;
3280         return NT_STATUS_OK;
3281
3282  fail:
3283         TALLOC_FREE(result);
3284         return NT_STATUS_NO_MEMORY;
3285 #else
3286         return NT_STATUS_NOT_SUPPORTED;
3287 #endif
3288 }
3289
3290 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3291 {
3292         close(p->trans.sock.fd);
3293         return 0;
3294 }
3295
3296 /**
3297  * Create an rpc pipe client struct, connecting to a tcp port.
3298  */
3299 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3300                                        uint16_t port,
3301                                        const struct ndr_syntax_id *abstract_syntax,
3302                                        struct rpc_pipe_client **presult)
3303 {
3304         struct rpc_pipe_client *result;
3305         struct sockaddr_storage addr;
3306         NTSTATUS status;
3307
3308         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3309         if (result == NULL) {
3310                 return NT_STATUS_NO_MEMORY;
3311         }
3312
3313         result->transport_type = NCACN_IP_TCP;
3314
3315         result->abstract_syntax = *abstract_syntax;
3316         result->transfer_syntax = ndr_transfer_syntax;
3317         result->dispatch = cli_do_rpc_ndr;
3318
3319         result->desthost = talloc_strdup(result, host);
3320         result->srv_name_slash = talloc_asprintf_strupper_m(
3321                 result, "\\\\%s", result->desthost);
3322         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3323                 status = NT_STATUS_NO_MEMORY;
3324                 goto fail;
3325         }
3326
3327         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3328         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3329
3330         if (!resolve_name(host, &addr, 0)) {
3331                 status = NT_STATUS_NOT_FOUND;
3332                 goto fail;
3333         }
3334
3335         status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3336         if (!NT_STATUS_IS_OK(status)) {
3337                 goto fail;
3338         }
3339
3340         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3341
3342         *presult = result;
3343         return NT_STATUS_OK;
3344
3345  fail:
3346         TALLOC_FREE(result);
3347         return status;
3348 }
3349
3350 /**
3351  * Determine the tcp port on which a dcerpc interface is listening
3352  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3353  * target host.
3354  */
3355 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3356                                       const struct ndr_syntax_id *abstract_syntax,
3357                                       uint16_t *pport)
3358 {
3359         NTSTATUS status;
3360         struct rpc_pipe_client *epm_pipe = NULL;
3361         struct cli_pipe_auth_data *auth = NULL;
3362         struct dcerpc_binding *map_binding = NULL;
3363         struct dcerpc_binding *res_binding = NULL;
3364         struct epm_twr_t *map_tower = NULL;
3365         struct epm_twr_t *res_towers = NULL;
3366         struct policy_handle *entry_handle = NULL;
3367         uint32_t num_towers = 0;
3368         uint32_t max_towers = 1;
3369         struct epm_twr_p_t towers;
3370         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3371
3372         if (pport == NULL) {
3373                 status = NT_STATUS_INVALID_PARAMETER;
3374                 goto done;
3375         }
3376
3377         /* open the connection to the endpoint mapper */
3378         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3379                                         &ndr_table_epmapper.syntax_id,
3380                                         &epm_pipe);
3381
3382         if (!NT_STATUS_IS_OK(status)) {
3383                 goto done;
3384         }
3385
3386         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3387         if (!NT_STATUS_IS_OK(status)) {
3388                 goto done;
3389         }
3390
3391         status = rpc_pipe_bind(epm_pipe, auth);
3392         if (!NT_STATUS_IS_OK(status)) {
3393                 goto done;
3394         }
3395
3396         /* create tower for asking the epmapper */
3397
3398         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3399         if (map_binding == NULL) {
3400                 status = NT_STATUS_NO_MEMORY;
3401                 goto done;
3402         }
3403
3404         map_binding->transport = NCACN_IP_TCP;
3405         map_binding->object = *abstract_syntax;
3406         map_binding->host = host; /* needed? */
3407         map_binding->endpoint = "0"; /* correct? needed? */
3408
3409         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3410         if (map_tower == NULL) {
3411                 status = NT_STATUS_NO_MEMORY;
3412                 goto done;
3413         }
3414
3415         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3416                                             &(map_tower->tower));
3417         if (!NT_STATUS_IS_OK(status)) {
3418                 goto done;
3419         }
3420
3421         /* allocate further parameters for the epm_Map call */
3422
3423         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3424         if (res_towers == NULL) {
3425                 status = NT_STATUS_NO_MEMORY;
3426                 goto done;
3427         }
3428         towers.twr = res_towers;
3429
3430         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3431         if (entry_handle == NULL) {
3432                 status = NT_STATUS_NO_MEMORY;
3433                 goto done;
3434         }
3435
3436         /* ask the endpoint mapper for the port */
3437
3438         status = rpccli_epm_Map(epm_pipe,
3439                                 tmp_ctx,
3440                                 CONST_DISCARD(struct GUID *,
3441                                               &(abstract_syntax->uuid)),
3442                                 map_tower,
3443                                 entry_handle,
3444                                 max_towers,
3445                                 &num_towers,
3446                                 &towers);
3447
3448         if (!NT_STATUS_IS_OK(status)) {
3449                 goto done;
3450         }
3451
3452         if (num_towers != 1) {
3453                 status = NT_STATUS_UNSUCCESSFUL;
3454                 goto done;
3455         }
3456
3457         /* extract the port from the answer */
3458
3459         status = dcerpc_binding_from_tower(tmp_ctx,
3460                                            &(towers.twr->tower),
3461                                            &res_binding);
3462         if (!NT_STATUS_IS_OK(status)) {
3463                 goto done;
3464         }
3465
3466         /* are further checks here necessary? */
3467         if (res_binding->transport != NCACN_IP_TCP) {
3468                 status = NT_STATUS_UNSUCCESSFUL;
3469                 goto done;
3470         }
3471
3472         *pport = (uint16_t)atoi(res_binding->endpoint);
3473
3474 done:
3475         TALLOC_FREE(tmp_ctx);
3476         return status;
3477 }
3478
3479 /**
3480  * Create a rpc pipe client struct, connecting to a host via tcp.
3481  * The port is determined by asking the endpoint mapper on the given
3482  * host.
3483  */
3484 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3485                            const struct ndr_syntax_id *abstract_syntax,
3486                            struct rpc_pipe_client **presult)
3487 {
3488         NTSTATUS status;
3489         uint16_t port = 0;
3490
3491         *presult = NULL;
3492
3493         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3494         if (!NT_STATUS_IS_OK(status)) {
3495                 goto done;
3496         }
3497
3498         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3499                                         abstract_syntax, presult);
3500
3501 done:
3502         return status;
3503 }
3504
3505 /********************************************************************
3506  Create a rpc pipe client struct, connecting to a unix domain socket
3507  ********************************************************************/
3508 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3509                                const struct ndr_syntax_id *abstract_syntax,
3510                                struct rpc_pipe_client **presult)
3511 {
3512         struct rpc_pipe_client *result;
3513         struct sockaddr_un addr;
3514         NTSTATUS status;
3515
3516         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3517         if (result == NULL) {
3518                 return NT_STATUS_NO_MEMORY;
3519         }
3520
3521         result->transport_type = NCACN_UNIX_STREAM;
3522
3523         result->abstract_syntax = *abstract_syntax;
3524         result->transfer_syntax = ndr_transfer_syntax;
3525         result->dispatch = cli_do_rpc_ndr;
3526
3527         result->desthost = talloc_get_myname(result);
3528         result->srv_name_slash = talloc_asprintf_strupper_m(
3529                 result, "\\\\%s", result->desthost);
3530         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3531                 status = NT_STATUS_NO_MEMORY;
3532                 goto fail;
3533         }
3534
3535         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3536         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3537
3538         result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3539         if (result->trans.sock.fd == -1) {
3540                 status = map_nt_error_from_unix(errno);
3541                 goto fail;
3542         }
3543
3544         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3545
3546         ZERO_STRUCT(addr);
3547         addr.sun_family = AF_UNIX;
3548         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3549
3550         if (sys_connect(result->trans.sock.fd,
3551                         (struct sockaddr *)&addr) == -1) {
3552                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3553                           strerror(errno)));
3554                 close(result->trans.sock.fd);
3555                 return map_nt_error_from_unix(errno);
3556         }
3557
3558         *presult = result;
3559         return NT_STATUS_OK;
3560
3561  fail:
3562         TALLOC_FREE(result);
3563         return status;
3564 }
3565
3566
3567 /****************************************************************************
3568  Open a named pipe over SMB to a remote server.
3569  *
3570  * CAVEAT CALLER OF THIS FUNCTION:
3571  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3572  *    so be sure that this function is called AFTER any structure (vs pointer)
3573  *    assignment of the cli.  In particular, libsmbclient does structure
3574  *    assignments of cli, which invalidates the data in the returned
3575  *    rpc_pipe_client if this function is called before the structure assignment
3576  *    of cli.
3577  * 
3578  ****************************************************************************/
3579
3580 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3581                                  const struct ndr_syntax_id *abstract_syntax,
3582                                  struct rpc_pipe_client **presult)
3583 {
3584         struct rpc_pipe_client *result;
3585         int fnum;
3586
3587         /* sanity check to protect against crashes */
3588
3589         if ( !cli ) {
3590                 return NT_STATUS_INVALID_HANDLE;
3591         }
3592
3593         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3594         if (result == NULL) {
3595                 return NT_STATUS_NO_MEMORY;
3596         }
3597
3598         result->transport_type = NCACN_NP;
3599
3600         result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3601                 result, abstract_syntax);
3602         if (result->trans.np.pipe_name == NULL) {
3603                 DEBUG(1, ("Could not find pipe for interface\n"));
3604                 TALLOC_FREE(result);
3605                 return NT_STATUS_INVALID_PARAMETER;
3606         }
3607
3608         result->trans.np.cli = cli;
3609         result->abstract_syntax = *abstract_syntax;
3610         result->transfer_syntax = ndr_transfer_syntax;
3611         result->dispatch = cli_do_rpc_ndr;
3612         result->desthost = talloc_strdup(result, cli->desthost);
3613         result->srv_name_slash = talloc_asprintf_strupper_m(
3614                 result, "\\\\%s", result->desthost);
3615
3616         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3617         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3618
3619         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3620                 TALLOC_FREE(result);
3621                 return NT_STATUS_NO_MEMORY;
3622         }
3623
3624         fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3625                              DESIRED_ACCESS_PIPE);
3626         if (fnum == -1) {
3627                 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3628                          "to machine %s.  Error was %s\n",
3629                          result->trans.np.pipe_name, cli->desthost,
3630                          cli_errstr(cli)));
3631                 TALLOC_FREE(result);
3632                 return cli_get_nt_error(cli);
3633         }
3634
3635         result->trans.np.fnum = fnum;
3636
3637         DLIST_ADD(cli->pipe_list, result);
3638         talloc_set_destructor(result, rpc_pipe_destructor);
3639
3640         *presult = result;
3641         return NT_STATUS_OK;
3642 }
3643
3644 /****************************************************************************
3645  Open a pipe to a remote server.
3646  ****************************************************************************/
3647
3648 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3649                                   const struct ndr_syntax_id *interface,
3650                                   struct rpc_pipe_client **presult)
3651 {
3652         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3653                 /*
3654                  * We should have a better way to figure out this drsuapi
3655                  * speciality...
3656                  */
3657                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3658                                          presult);
3659         }
3660
3661         return rpc_pipe_open_np(cli, interface, presult);
3662 }
3663
3664 /****************************************************************************
3665  Open a named pipe to an SMB server and bind anonymously.
3666  ****************************************************************************/
3667
3668 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3669                                   const struct ndr_syntax_id *interface,
3670                                   struct rpc_pipe_client **presult)
3671 {
3672         struct rpc_pipe_client *result;
3673         struct cli_pipe_auth_data *auth;
3674         NTSTATUS status;
3675
3676         status = cli_rpc_pipe_open(cli, interface, &result);
3677         if (!NT_STATUS_IS_OK(status)) {
3678                 return status;
3679         }
3680
3681         status = rpccli_anon_bind_data(result, &auth);
3682         if (!NT_STATUS_IS_OK(status)) {
3683                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3684                           nt_errstr(status)));
3685                 TALLOC_FREE(result);
3686                 return status;
3687         }
3688
3689         /*
3690          * This is a bit of an abstraction violation due to the fact that an
3691          * anonymous bind on an authenticated SMB inherits the user/domain
3692          * from the enclosing SMB creds
3693          */
3694
3695         TALLOC_FREE(auth->user_name);
3696         TALLOC_FREE(auth->domain);
3697
3698         auth->user_name = talloc_strdup(auth, cli->user_name);
3699         auth->domain = talloc_strdup(auth, cli->domain);
3700         auth->user_session_key = data_blob_talloc(auth,
3701                 cli->user_session_key.data,
3702                 cli->user_session_key.length);
3703
3704         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3705                 TALLOC_FREE(result);
3706                 return NT_STATUS_NO_MEMORY;
3707         }
3708
3709         status = rpc_pipe_bind(result, auth);
3710         if (!NT_STATUS_IS_OK(status)) {
3711                 int lvl = 0;
3712                 if (ndr_syntax_id_equal(interface,
3713                                         &ndr_table_dssetup.syntax_id)) {
3714                         /* non AD domains just don't have this pipe, avoid
3715                          * level 0 statement in that case - gd */
3716                         lvl = 3;
3717                 }
3718                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3719                             "%s failed with error %s\n",
3720                             cli_get_pipe_name_from_iface(debug_ctx(),
3721                                                          interface),
3722                             nt_errstr(status) ));
3723                 TALLOC_FREE(result);
3724                 return status;
3725         }
3726
3727         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3728                   "%s and bound anonymously.\n", result->trans.np.pipe_name,
3729                   cli->desthost ));
3730
3731         *presult = result;
3732         return NT_STATUS_OK;
3733 }
3734
3735 /****************************************************************************
3736  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3737  ****************************************************************************/
3738
3739 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3740                                                    const struct ndr_syntax_id *interface,
3741                                                    enum pipe_auth_type auth_type,
3742                                                    enum pipe_auth_level auth_level,
3743                                                    const char *domain,
3744                                                    const char *username,
3745                                                    const char *password,
3746                                                    struct rpc_pipe_client **presult)
3747 {
3748         struct rpc_pipe_client *result;
3749         struct cli_pipe_auth_data *auth;
3750         NTSTATUS status;
3751
3752         status = cli_rpc_pipe_open(cli, interface, &result);
3753         if (!NT_STATUS_IS_OK(status)) {
3754                 return status;
3755         }
3756
3757         status = rpccli_ntlmssp_bind_data(
3758                 result, auth_type, auth_level, domain, username,
3759                 cli->pwd.null_pwd ? NULL : password, &auth);
3760         if (!NT_STATUS_IS_OK(status)) {
3761                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3762                           nt_errstr(status)));
3763                 goto err;
3764         }
3765
3766         status = rpc_pipe_bind(result, auth);
3767         if (!NT_STATUS_IS_OK(status)) {
3768                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3769                         nt_errstr(status) ));
3770                 goto err;
3771         }
3772
3773         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3774                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3775                 result->trans.np.pipe_name, cli->desthost,
3776                 domain, username ));
3777
3778         *presult = result;
3779         return NT_STATUS_OK;
3780
3781   err:
3782
3783         TALLOC_FREE(result);
3784         return status;
3785 }
3786
3787 /****************************************************************************
3788  External interface.
3789  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3790  ****************************************************************************/
3791
3792 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3793                                    const struct ndr_syntax_id *interface,
3794                                    enum pipe_auth_level auth_level,
3795                                    const char *domain,
3796                                    const char *username,
3797                                    const char *password,
3798                                    struct rpc_pipe_client **presult)
3799 {
3800         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3801                                                 interface,
3802                                                 PIPE_AUTH_TYPE_NTLMSSP,
3803                                                 auth_level,
3804                                                 domain,
3805                                                 username,
3806                                                 password,
3807                                                 presult);
3808 }
3809
3810 /****************************************************************************
3811  External interface.
3812  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3813  ****************************************************************************/
3814
3815 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3816                                           const struct ndr_syntax_id *interface,
3817                                           enum pipe_auth_level auth_level,
3818                                           const char *domain,
3819                                           const char *username,
3820                                           const char *password,
3821                                           struct rpc_pipe_client **presult)
3822 {
3823         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3824                                                 interface,
3825                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3826                                                 auth_level,
3827                                                 domain,
3828                                                 username,
3829                                                 password,
3830                                                 presult);
3831 }
3832
3833 /****************************************************************************
3834   Get a the schannel session key out of an already opened netlogon pipe.
3835  ****************************************************************************/
3836 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3837                                                 struct cli_state *cli,
3838                                                 const char *domain,
3839                                                 uint32 *pneg_flags)
3840 {
3841         uint32 sec_chan_type = 0;
3842         unsigned char machine_pwd[16];
3843         const char *machine_account;
3844         NTSTATUS status;
3845
3846         /* Get the machine account credentials from secrets.tdb. */
3847         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3848                                &sec_chan_type))
3849         {
3850                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3851                         "trust account password for domain '%s'\n",
3852                         domain));
3853                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3854         }
3855
3856         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3857                                         cli->desthost, /* server name */
3858                                         domain,        /* domain */
3859                                         global_myname(), /* client name */
3860                                         machine_account, /* machine account name */
3861                                         machine_pwd,
3862                                         sec_chan_type,
3863                                         pneg_flags);
3864
3865         if (!NT_STATUS_IS_OK(status)) {
3866                 DEBUG(3, ("get_schannel_session_key_common: "
3867                           "rpccli_netlogon_setup_creds failed with result %s "
3868                           "to server %s, domain %s, machine account %s.\n",
3869                           nt_errstr(status), cli->desthost, domain,
3870                           machine_account ));
3871                 return status;
3872         }
3873
3874         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3875                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3876                         cli->desthost));
3877                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3878         }
3879
3880         return NT_STATUS_OK;;
3881 }
3882
3883 /****************************************************************************
3884  Open a netlogon pipe and get the schannel session key.
3885  Now exposed to external callers.
3886  ****************************************************************************/
3887
3888
3889 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3890                                   const char *domain,
3891                                   uint32 *pneg_flags,
3892                                   struct rpc_pipe_client **presult)
3893 {
3894         struct rpc_pipe_client *netlogon_pipe = NULL;
3895         NTSTATUS status;
3896
3897         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3898                                           &netlogon_pipe);
3899         if (!NT_STATUS_IS_OK(status)) {
3900                 return status;
3901         }
3902
3903         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3904                                                  pneg_flags);
3905         if (!NT_STATUS_IS_OK(status)) {
3906                 TALLOC_FREE(netlogon_pipe);
3907                 return status;
3908         }
3909
3910         *presult = netlogon_pipe;
3911         return NT_STATUS_OK;
3912 }
3913
3914 /****************************************************************************
3915  External interface.
3916  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3917  using session_key. sign and seal.
3918  ****************************************************************************/
3919
3920 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3921                                              const struct ndr_syntax_id *interface,
3922                                              enum pipe_auth_level auth_level,
3923                                              const char *domain,
3924                                              const struct dcinfo *pdc,
3925                                              struct rpc_pipe_client **presult)
3926 {
3927         struct rpc_pipe_client *result;
3928         struct cli_pipe_auth_data *auth;
3929         NTSTATUS status;
3930
3931         status = cli_rpc_pipe_open(cli, interface, &result);
3932         if (!NT_STATUS_IS_OK(status)) {
3933                 return status;
3934         }
3935
3936         status = rpccli_schannel_bind_data(result, domain, auth_level,
3937                                            pdc->sess_key, &auth);
3938         if (!NT_STATUS_IS_OK(status)) {
3939                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3940                           nt_errstr(status)));
3941                 TALLOC_FREE(result);
3942                 return status;
3943         }
3944
3945         status = rpc_pipe_bind(result, auth);
3946         if (!NT_STATUS_IS_OK(status)) {
3947                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3948                           "cli_rpc_pipe_bind failed with error %s\n",
3949                           nt_errstr(status) ));
3950                 TALLOC_FREE(result);
3951                 return status;
3952         }
3953
3954         /*
3955          * The credentials on a new netlogon pipe are the ones we are passed
3956          * in - copy them over.
3957          */
3958         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3959         if (result->dc == NULL) {
3960                 DEBUG(0, ("talloc failed\n"));
3961                 TALLOC_FREE(result);
3962                 return NT_STATUS_NO_MEMORY;
3963         }
3964
3965         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3966                 "for domain %s "
3967                 "and bound using schannel.\n",
3968                 result->trans.np.pipe_name, cli->desthost, domain ));
3969
3970         *presult = result;
3971         return NT_STATUS_OK;
3972 }
3973
3974 /****************************************************************************
3975  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3976  Fetch the session key ourselves using a temporary netlogon pipe. This
3977  version uses an ntlmssp auth bound netlogon pipe to get the key.
3978  ****************************************************************************/
3979
3980 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3981                                                       const char *domain,
3982                                                       const char *username,
3983                                                       const char *password,
3984                                                       uint32 *pneg_flags,
3985                                                       struct rpc_pipe_client **presult)
3986 {
3987         struct rpc_pipe_client *netlogon_pipe = NULL;
3988         NTSTATUS status;
3989
3990         status = cli_rpc_pipe_open_spnego_ntlmssp(
3991                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3992                 domain, username, password, &netlogon_pipe);
3993         if (!NT_STATUS_IS_OK(status)) {
3994                 return status;
3995         }
3996
3997         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3998                                                  pneg_flags);
3999         if (!NT_STATUS_IS_OK(status)) {
4000                 TALLOC_FREE(netlogon_pipe);
4001                 return status;
4002         }
4003
4004         *presult = netlogon_pipe;
4005         return NT_STATUS_OK;
4006 }
4007
4008 /****************************************************************************
4009  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4010  Fetch the session key ourselves using a temporary netlogon pipe. This version
4011  uses an ntlmssp bind to get the session key.
4012  ****************************************************************************/
4013
4014 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4015                                                  const struct ndr_syntax_id *interface,
4016                                                  enum pipe_auth_level auth_level,
4017                                                  const char *domain,
4018                                                  const char *username,
4019                                                  const char *password,
4020                                                  struct rpc_pipe_client **presult)
4021 {
4022         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4023         struct rpc_pipe_client *netlogon_pipe = NULL;
4024         struct rpc_pipe_client *result = NULL;
4025         NTSTATUS status;
4026
4027         status = get_schannel_session_key_auth_ntlmssp(
4028                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4029         if (!NT_STATUS_IS_OK(status)) {
4030                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4031                         "key from server %s for domain %s.\n",
4032                         cli->desthost, domain ));
4033                 return status;
4034         }
4035
4036         status = cli_rpc_pipe_open_schannel_with_key(
4037                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4038                 &result);
4039
4040         /* Now we've bound using the session key we can close the netlog pipe. */
4041         TALLOC_FREE(netlogon_pipe);
4042
4043         if (NT_STATUS_IS_OK(status)) {
4044                 *presult = result;
4045         }
4046         return status;
4047 }
4048
4049 /****************************************************************************
4050  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4051  Fetch the session key ourselves using a temporary netlogon pipe.
4052  ****************************************************************************/
4053
4054 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4055                                     const struct ndr_syntax_id *interface,
4056                                     enum pipe_auth_level auth_level,
4057                                     const char *domain,
4058                                     struct rpc_pipe_client **presult)
4059 {
4060         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4061         struct rpc_pipe_client *netlogon_pipe = NULL;
4062         struct rpc_pipe_client *result = NULL;
4063         NTSTATUS status;
4064
4065         status = get_schannel_session_key(cli, domain, &neg_flags,
4066                                           &netlogon_pipe);
4067         if (!NT_STATUS_IS_OK(status)) {
4068                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4069                         "key from server %s for domain %s.\n",
4070                         cli->desthost, domain ));
4071                 return status;
4072         }
4073
4074         status = cli_rpc_pipe_open_schannel_with_key(
4075                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4076                 &result);
4077
4078         /* Now we've bound using the session key we can close the netlog pipe. */
4079         TALLOC_FREE(netlogon_pipe);
4080
4081         if (NT_STATUS_IS_OK(status)) {
4082                 *presult = result;
4083         }
4084
4085         return NT_STATUS_OK;
4086 }
4087
4088 /****************************************************************************
4089  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4090  The idea is this can be called with service_princ, username and password all
4091  NULL so long as the caller has a TGT.
4092  ****************************************************************************/
4093
4094 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4095                                 const struct ndr_syntax_id *interface,
4096                                 enum pipe_auth_level auth_level,
4097                                 const char *service_princ,
4098                                 const char *username,
4099                                 const char *password,
4100                                 struct rpc_pipe_client **presult)
4101 {
4102 #ifdef HAVE_KRB5
4103         struct rpc_pipe_client *result;
4104         struct cli_pipe_auth_data *auth;
4105         NTSTATUS status;
4106
4107         status = cli_rpc_pipe_open(cli, interface, &result);
4108         if (!NT_STATUS_IS_OK(status)) {
4109                 return status;
4110         }
4111
4112         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4113                                            username, password, &auth);
4114         if (!NT_STATUS_IS_OK(status)) {
4115                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4116                           nt_errstr(status)));
4117                 TALLOC_FREE(result);
4118                 return status;
4119         }
4120
4121         status = rpc_pipe_bind(result, auth);
4122         if (!NT_STATUS_IS_OK(status)) {
4123                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4124                           "with error %s\n", nt_errstr(status)));
4125                 TALLOC_FREE(result);
4126                 return status;
4127         }
4128
4129         *presult = result;
4130         return NT_STATUS_OK;
4131 #else
4132         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4133         return NT_STATUS_NOT_IMPLEMENTED;
4134 #endif
4135 }
4136
4137 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4138                              struct rpc_pipe_client *cli,
4139                              DATA_BLOB *session_key)
4140 {
4141         if (!session_key || !cli) {
4142                 return NT_STATUS_INVALID_PARAMETER;
4143         }
4144
4145         if (!cli->auth) {
4146                 return NT_STATUS_INVALID_PARAMETER;
4147         }
4148
4149         switch (cli->auth->auth_type) {
4150                 case PIPE_AUTH_TYPE_SCHANNEL:
4151                         *session_key = data_blob_talloc(mem_ctx,
4152                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4153                         break;
4154                 case PIPE_AUTH_TYPE_NTLMSSP:
4155                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4156                         *session_key = data_blob_talloc(mem_ctx,
4157                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4158                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4159                         break;
4160                 case PIPE_AUTH_TYPE_KRB5:
4161                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4162                         *session_key = data_blob_talloc(mem_ctx,
4163                                 cli->auth->a_u.kerberos_auth->session_key.data,
4164                                 cli->auth->a_u.kerberos_auth->session_key.length);
4165                         break;
4166                 case PIPE_AUTH_TYPE_NONE:
4167                         *session_key = data_blob_talloc(mem_ctx,
4168                                 cli->auth->user_session_key.data,
4169                                 cli->auth->user_session_key.length);
4170                         break;
4171                 default:
4172                         return NT_STATUS_NO_USER_SESSION_KEY;
4173         }
4174
4175         return NT_STATUS_OK;
4176 }
4177
4178 /**
4179  * Create a new RPC client context which uses a local dispatch function.
4180  */
4181 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax, 
4182                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4183                                 struct auth_serversupplied_info *serversupplied_info,
4184                                 struct rpc_pipe_client **presult)
4185 {
4186         struct rpc_pipe_client *result;
4187
4188         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4189         if (result == NULL) {
4190                 return NT_STATUS_NO_MEMORY;
4191         }
4192
4193         result->transport_type = NCACN_INTERNAL; 
4194
4195         result->abstract_syntax = *abstract_syntax;
4196         result->transfer_syntax = ndr_transfer_syntax;
4197         result->dispatch = dispatch;
4198
4199         result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4200         if (result->pipes_struct == NULL) {
4201                 TALLOC_FREE(result);
4202                 return NT_STATUS_NO_MEMORY;
4203         }
4204         result->pipes_struct->mem_ctx = mem_ctx;
4205         result->pipes_struct->server_info = serversupplied_info;
4206         result->pipes_struct->pipe_bound = true;
4207
4208         result->max_xmit_frag = -1;
4209         result->max_recv_frag = -1;
4210
4211         *presult = result;
4212         return NT_STATUS_OK;
4213 }
4214
4215