5a53c0d94097180797d1272599b65c3aa90aa280
[samba.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          const struct ndr_syntax_id *interface)
86 {
87         int i;
88         for (i = 0; pipe_names[i].client_pipe; i++) {
89                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
90                                         interface)) {
91                         return &pipe_names[i].client_pipe[5];
92                 }
93         }
94
95         /*
96          * Here we should ask \\epmapper, but for now our code is only
97          * interested in the known pipes mentioned in pipe_names[]
98          */
99
100         return NULL;
101 }
102
103 /********************************************************************
104  Map internal value to wire value.
105  ********************************************************************/
106
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
108 {
109         switch (auth_type) {
110
111         case PIPE_AUTH_TYPE_NONE:
112                 return RPC_ANONYMOUS_AUTH_TYPE;
113
114         case PIPE_AUTH_TYPE_NTLMSSP:
115                 return RPC_NTLMSSP_AUTH_TYPE;
116
117         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119                 return RPC_SPNEGO_AUTH_TYPE;
120
121         case PIPE_AUTH_TYPE_SCHANNEL:
122                 return RPC_SCHANNEL_AUTH_TYPE;
123
124         case PIPE_AUTH_TYPE_KRB5:
125                 return RPC_KRB5_AUTH_TYPE;
126
127         default:
128                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
129                         "auth type %u\n",
130                         (unsigned int)auth_type ));
131                 break;
132         }
133         return -1;
134 }
135
136 /********************************************************************
137  Pipe description for a DEBUG
138  ********************************************************************/
139 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
140                                    struct rpc_pipe_client *cli)
141 {
142         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
143         if (result == NULL) {
144                 return "pipe";
145         }
146         return result;
147 }
148
149 /********************************************************************
150  Rpc pipe call id.
151  ********************************************************************/
152
153 static uint32 get_rpc_call_id(void)
154 {
155         static uint32 call_id = 0;
156         return ++call_id;
157 }
158
159 /*
160  * Realloc pdu to have a least "size" bytes
161  */
162
163 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
164 {
165         size_t extra_size;
166
167         if (prs_data_size(pdu) >= size) {
168                 return true;
169         }
170
171         extra_size = size - prs_data_size(pdu);
172
173         if (!prs_force_grow(pdu, extra_size)) {
174                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
175                           "%d bytes.\n", (int)extra_size));
176                 return false;
177         }
178
179         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
180                   (int)extra_size, prs_data_size(pdu)));
181         return true;
182 }
183
184
185 /*******************************************************************
186  Use SMBreadX to get rest of one fragment's worth of rpc data.
187  Reads the whole size or give an error message
188  ********************************************************************/
189
190 struct rpc_read_state {
191         struct event_context *ev;
192         struct rpc_cli_transport *transport;
193         uint8_t *data;
194         size_t size;
195         size_t num_read;
196 };
197
198 static void rpc_read_done(struct async_req *subreq);
199
200 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
201                                        struct event_context *ev,
202                                        struct rpc_cli_transport *transport,
203                                        uint8_t *data, size_t size)
204 {
205         struct async_req *result, *subreq;
206         struct rpc_read_state *state;
207
208         if (!async_req_setup(mem_ctx, &result, &state,
209                              struct rpc_read_state)) {
210                 return NULL;
211         }
212         state->ev = ev;
213         state->transport = transport;
214         state->data = data;
215         state->size = size;
216         state->num_read = 0;
217
218         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
219
220         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
221                                       transport->priv);
222         if (subreq == NULL) {
223                 goto fail;
224         }
225         subreq->async.fn = rpc_read_done;
226         subreq->async.priv = result;
227         return result;
228
229  fail:
230         TALLOC_FREE(result);
231         return NULL;
232 }
233
234 static void rpc_read_done(struct async_req *subreq)
235 {
236         struct async_req *req = talloc_get_type_abort(
237                 subreq->async.priv, struct async_req);
238         struct rpc_read_state *state = talloc_get_type_abort(
239                 req->private_data, struct rpc_read_state);
240         NTSTATUS status;
241         ssize_t received;
242
243         status = state->transport->read_recv(subreq, &received);
244         TALLOC_FREE(subreq);
245         if (!NT_STATUS_IS_OK(status)) {
246                 async_req_error(req, status);
247                 return;
248         }
249
250         state->num_read += received;
251         if (state->num_read == state->size) {
252                 async_req_done(req);
253                 return;
254         }
255
256         subreq = state->transport->read_send(state, state->ev,
257                                              state->data + state->num_read,
258                                              state->size - state->num_read,
259                                              state->transport->priv);
260         if (async_req_nomem(subreq, req)) {
261                 return;
262         }
263         subreq->async.fn = rpc_read_done;
264         subreq->async.priv = req;
265 }
266
267 static NTSTATUS rpc_read_recv(struct async_req *req)
268 {
269         return async_req_simple_recv(req);
270 }
271
272 struct rpc_write_state {
273         struct event_context *ev;
274         struct rpc_cli_transport *transport;
275         const uint8_t *data;
276         size_t size;
277         size_t num_written;
278 };
279
280 static void rpc_write_done(struct async_req *subreq);
281
282 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
283                                         struct event_context *ev,
284                                         struct rpc_cli_transport *transport,
285                                         const uint8_t *data, size_t size)
286 {
287         struct async_req *result, *subreq;
288         struct rpc_write_state *state;
289
290         if (!async_req_setup(mem_ctx, &result, &state,
291                              struct rpc_write_state)) {
292                 return NULL;
293         }
294         state->ev = ev;
295         state->transport = transport;
296         state->data = data;
297         state->size = size;
298         state->num_written = 0;
299
300         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
301
302         subreq = transport->write_send(state, ev, data, size, transport->priv);
303         if (subreq == NULL) {
304                 goto fail;
305         }
306         subreq->async.fn = rpc_write_done;
307         subreq->async.priv = result;
308         return result;
309  fail:
310         TALLOC_FREE(result);
311         return NULL;
312 }
313
314 static void rpc_write_done(struct async_req *subreq)
315 {
316         struct async_req *req = talloc_get_type_abort(
317                 subreq->async.priv, struct async_req);
318         struct rpc_write_state *state = talloc_get_type_abort(
319                 req->private_data, struct rpc_write_state);
320         NTSTATUS status;
321         ssize_t written;
322
323         status = state->transport->write_recv(subreq, &written);
324         TALLOC_FREE(subreq);
325         if (!NT_STATUS_IS_OK(status)) {
326                 async_req_error(req, status);
327                 return;
328         }
329
330         state->num_written += written;
331
332         if (state->num_written == state->size) {
333                 async_req_done(req);
334                 return;
335         }
336
337         subreq = state->transport->write_send(state, state->ev,
338                                               state->data + state->num_written,
339                                               state->size - state->num_written,
340                                               state->transport->priv);
341         if (async_req_nomem(subreq, req)) {
342                 return;
343         }
344         subreq->async.fn = rpc_write_done;
345         subreq->async.priv = req;
346 }
347
348 static NTSTATUS rpc_write_recv(struct async_req *req)
349 {
350         return async_req_simple_recv(req);
351 }
352
353
354 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
355                                  struct rpc_hdr_info *prhdr,
356                                  prs_struct *pdu)
357 {
358         /*
359          * This next call sets the endian bit correctly in current_pdu. We
360          * will propagate this to rbuf later.
361          */
362
363         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
364                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
365                 return NT_STATUS_BUFFER_TOO_SMALL;
366         }
367
368         if (prhdr->frag_len > cli->max_recv_frag) {
369                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
370                           " we only allow %d\n", (int)prhdr->frag_len,
371                           (int)cli->max_recv_frag));
372                 return NT_STATUS_BUFFER_TOO_SMALL;
373         }
374
375         return NT_STATUS_OK;
376 }
377
378 /****************************************************************************
379  Try and get a PDU's worth of data from current_pdu. If not, then read more
380  from the wire.
381  ****************************************************************************/
382
383 struct get_complete_frag_state {
384         struct event_context *ev;
385         struct rpc_pipe_client *cli;
386         struct rpc_hdr_info *prhdr;
387         prs_struct *pdu;
388 };
389
390 static void get_complete_frag_got_header(struct async_req *subreq);
391 static void get_complete_frag_got_rest(struct async_req *subreq);
392
393 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
394                                                struct event_context *ev,
395                                                struct rpc_pipe_client *cli,
396                                                struct rpc_hdr_info *prhdr,
397                                                prs_struct *pdu)
398 {
399         struct async_req *result, *subreq;
400         struct get_complete_frag_state *state;
401         uint32_t pdu_len;
402         NTSTATUS status;
403
404         if (!async_req_setup(mem_ctx, &result, &state,
405                              struct get_complete_frag_state)) {
406                 return NULL;
407         }
408         state->ev = ev;
409         state->cli = cli;
410         state->prhdr = prhdr;
411         state->pdu = pdu;
412
413         pdu_len = prs_data_size(pdu);
414         if (pdu_len < RPC_HEADER_LEN) {
415                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
416                         status = NT_STATUS_NO_MEMORY;
417                         goto post_status;
418                 }
419                 subreq = rpc_read_send(
420                         state, state->ev,
421                         state->cli->transport,
422                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
423                         RPC_HEADER_LEN - pdu_len);
424                 if (subreq == NULL) {
425                         status = NT_STATUS_NO_MEMORY;
426                         goto post_status;
427                 }
428                 subreq->async.fn = get_complete_frag_got_header;
429                 subreq->async.priv = result;
430                 return result;
431         }
432
433         status = parse_rpc_header(cli, prhdr, pdu);
434         if (!NT_STATUS_IS_OK(status)) {
435                 goto post_status;
436         }
437
438         /*
439          * Ensure we have frag_len bytes of data.
440          */
441         if (pdu_len < prhdr->frag_len) {
442                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
443                         status = NT_STATUS_NO_MEMORY;
444                         goto post_status;
445                 }
446                 subreq = rpc_read_send(state, state->ev,
447                                        state->cli->transport,
448                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
449                                        prhdr->frag_len - pdu_len);
450                 if (subreq == NULL) {
451                         status = NT_STATUS_NO_MEMORY;
452                         goto post_status;
453                 }
454                 subreq->async.fn = get_complete_frag_got_rest;
455                 subreq->async.priv = result;
456                 return result;
457         }
458
459         status = NT_STATUS_OK;
460  post_status:
461         if (async_post_status(result, ev, status)) {
462                 return result;
463         }
464         TALLOC_FREE(result);
465         return NULL;
466 }
467
468 static void get_complete_frag_got_header(struct async_req *subreq)
469 {
470         struct async_req *req = talloc_get_type_abort(
471                 subreq->async.priv, struct async_req);
472         struct get_complete_frag_state *state = talloc_get_type_abort(
473                 req->private_data, struct get_complete_frag_state);
474         NTSTATUS status;
475
476         status = rpc_read_recv(subreq);
477         TALLOC_FREE(subreq);
478         if (!NT_STATUS_IS_OK(status)) {
479                 async_req_error(req, status);
480                 return;
481         }
482
483         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
484         if (!NT_STATUS_IS_OK(status)) {
485                 async_req_error(req, status);
486                 return;
487         }
488
489         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
490                 async_req_error(req, NT_STATUS_NO_MEMORY);
491                 return;
492         }
493
494         /*
495          * We're here in this piece of code because we've read exactly
496          * RPC_HEADER_LEN bytes into state->pdu.
497          */
498
499         subreq = rpc_read_send(
500                 state, state->ev, state->cli->transport,
501                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
502                 state->prhdr->frag_len - RPC_HEADER_LEN);
503         if (async_req_nomem(subreq, req)) {
504                 return;
505         }
506         subreq->async.fn = get_complete_frag_got_rest;
507         subreq->async.priv = req;
508 }
509
510 static void get_complete_frag_got_rest(struct async_req *subreq)
511 {
512         struct async_req *req = talloc_get_type_abort(
513                 subreq->async.priv, struct async_req);
514         NTSTATUS status;
515
516         status = rpc_read_recv(subreq);
517         TALLOC_FREE(subreq);
518         if (!NT_STATUS_IS_OK(status)) {
519                 async_req_error(req, status);
520                 return;
521         }
522         async_req_done(req);
523 }
524
525 static NTSTATUS get_complete_frag_recv(struct async_req *req)
526 {
527         return async_req_simple_recv(req);
528 }
529
530 /****************************************************************************
531  NTLMSSP specific sign/seal.
532  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
533  In fact I should probably abstract these into identical pieces of code... JRA.
534  ****************************************************************************/
535
536 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
537                                 prs_struct *current_pdu,
538                                 uint8 *p_ss_padding_len)
539 {
540         RPC_HDR_AUTH auth_info;
541         uint32 save_offset = prs_offset(current_pdu);
542         uint32 auth_len = prhdr->auth_len;
543         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
544         unsigned char *data = NULL;
545         size_t data_len;
546         unsigned char *full_packet_data = NULL;
547         size_t full_packet_data_len;
548         DATA_BLOB auth_blob;
549         NTSTATUS status;
550
551         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
552             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
553                 return NT_STATUS_OK;
554         }
555
556         if (!ntlmssp_state) {
557                 return NT_STATUS_INVALID_PARAMETER;
558         }
559
560         /* Ensure there's enough data for an authenticated response. */
561         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
562                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
563                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
564                         (unsigned int)auth_len ));
565                 return NT_STATUS_BUFFER_TOO_SMALL;
566         }
567
568         /*
569          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
570          * after the RPC header.
571          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
572          * functions as NTLMv2 checks the rpc headers also.
573          */
574
575         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
576         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
577
578         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
579         full_packet_data_len = prhdr->frag_len - auth_len;
580
581         /* Pull the auth header and the following data into a blob. */
582         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
583                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
584                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
585                 return NT_STATUS_BUFFER_TOO_SMALL;
586         }
587
588         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
589                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
590                 return NT_STATUS_BUFFER_TOO_SMALL;
591         }
592
593         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
594         auth_blob.length = auth_len;
595
596         switch (cli->auth->auth_level) {
597                 case PIPE_AUTH_LEVEL_PRIVACY:
598                         /* Data is encrypted. */
599                         status = ntlmssp_unseal_packet(ntlmssp_state,
600                                                         data, data_len,
601                                                         full_packet_data,
602                                                         full_packet_data_len,
603                                                         &auth_blob);
604                         if (!NT_STATUS_IS_OK(status)) {
605                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
606                                         "packet from %s. Error was %s.\n",
607                                         rpccli_pipe_txt(debug_ctx(), cli),
608                                         nt_errstr(status) ));
609                                 return status;
610                         }
611                         break;
612                 case PIPE_AUTH_LEVEL_INTEGRITY:
613                         /* Data is signed. */
614                         status = ntlmssp_check_packet(ntlmssp_state,
615                                                         data, data_len,
616                                                         full_packet_data,
617                                                         full_packet_data_len,
618                                                         &auth_blob);
619                         if (!NT_STATUS_IS_OK(status)) {
620                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
621                                         "packet from %s. Error was %s.\n",
622                                         rpccli_pipe_txt(debug_ctx(), cli),
623                                         nt_errstr(status) ));
624                                 return status;
625                         }
626                         break;
627                 default:
628                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
629                                   "auth level %d\n", cli->auth->auth_level));
630                         return NT_STATUS_INVALID_INFO_CLASS;
631         }
632
633         /*
634          * Return the current pointer to the data offset.
635          */
636
637         if(!prs_set_offset(current_pdu, save_offset)) {
638                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
639                         (unsigned int)save_offset ));
640                 return NT_STATUS_BUFFER_TOO_SMALL;
641         }
642
643         /*
644          * Remember the padding length. We must remove it from the real data
645          * stream once the sign/seal is done.
646          */
647
648         *p_ss_padding_len = auth_info.auth_pad_len;
649
650         return NT_STATUS_OK;
651 }
652
653 /****************************************************************************
654  schannel specific sign/seal.
655  ****************************************************************************/
656
657 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
658                                 prs_struct *current_pdu,
659                                 uint8 *p_ss_padding_len)
660 {
661         RPC_HDR_AUTH auth_info;
662         RPC_AUTH_SCHANNEL_CHK schannel_chk;
663         uint32 auth_len = prhdr->auth_len;
664         uint32 save_offset = prs_offset(current_pdu);
665         struct schannel_auth_struct *schannel_auth =
666                 cli->auth->a_u.schannel_auth;
667         uint32 data_len;
668
669         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
670             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
671                 return NT_STATUS_OK;
672         }
673
674         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
675                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
676                 return NT_STATUS_INVALID_PARAMETER;
677         }
678
679         if (!schannel_auth) {
680                 return NT_STATUS_INVALID_PARAMETER;
681         }
682
683         /* Ensure there's enough data for an authenticated response. */
684         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
685                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
686                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
687                         (unsigned int)auth_len ));
688                 return NT_STATUS_INVALID_PARAMETER;
689         }
690
691         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
692
693         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
694                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
695                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
696                 return NT_STATUS_BUFFER_TOO_SMALL;
697         }
698
699         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
700                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
701                 return NT_STATUS_BUFFER_TOO_SMALL;
702         }
703
704         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
705                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
706                         auth_info.auth_type));
707                 return NT_STATUS_BUFFER_TOO_SMALL;
708         }
709
710         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
711                                 &schannel_chk, current_pdu, 0)) {
712                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
713                 return NT_STATUS_BUFFER_TOO_SMALL;
714         }
715
716         if (!schannel_decode(schannel_auth,
717                         cli->auth->auth_level,
718                         SENDER_IS_ACCEPTOR,
719                         &schannel_chk,
720                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
721                         data_len)) {
722                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
723                                 "Connection to %s.\n",
724                                 rpccli_pipe_txt(debug_ctx(), cli)));
725                 return NT_STATUS_INVALID_PARAMETER;
726         }
727
728         /* The sequence number gets incremented on both send and receive. */
729         schannel_auth->seq_num++;
730
731         /*
732          * Return the current pointer to the data offset.
733          */
734
735         if(!prs_set_offset(current_pdu, save_offset)) {
736                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
737                         (unsigned int)save_offset ));
738                 return NT_STATUS_BUFFER_TOO_SMALL;
739         }
740
741         /*
742          * Remember the padding length. We must remove it from the real data
743          * stream once the sign/seal is done.
744          */
745
746         *p_ss_padding_len = auth_info.auth_pad_len;
747
748         return NT_STATUS_OK;
749 }
750
751 /****************************************************************************
752  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
753  ****************************************************************************/
754
755 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
756                                 prs_struct *current_pdu,
757                                 uint8 *p_ss_padding_len)
758 {
759         NTSTATUS ret = NT_STATUS_OK;
760
761         /* Paranioa checks for auth_len. */
762         if (prhdr->auth_len) {
763                 if (prhdr->auth_len > prhdr->frag_len) {
764                         return NT_STATUS_INVALID_PARAMETER;
765                 }
766
767                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
768                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
769                         /* Integer wrap attempt. */
770                         return NT_STATUS_INVALID_PARAMETER;
771                 }
772         }
773
774         /*
775          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
776          */
777
778         switch(cli->auth->auth_type) {
779                 case PIPE_AUTH_TYPE_NONE:
780                         if (prhdr->auth_len) {
781                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
782                                           "Connection to %s - got non-zero "
783                                           "auth len %u.\n",
784                                         rpccli_pipe_txt(debug_ctx(), cli),
785                                         (unsigned int)prhdr->auth_len ));
786                                 return NT_STATUS_INVALID_PARAMETER;
787                         }
788                         break;
789
790                 case PIPE_AUTH_TYPE_NTLMSSP:
791                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
792                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
793                         if (!NT_STATUS_IS_OK(ret)) {
794                                 return ret;
795                         }
796                         break;
797
798                 case PIPE_AUTH_TYPE_SCHANNEL:
799                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
800                         if (!NT_STATUS_IS_OK(ret)) {
801                                 return ret;
802                         }
803                         break;
804
805                 case PIPE_AUTH_TYPE_KRB5:
806                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
807                 default:
808                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
809                                   "to %s - unknown internal auth type %u.\n",
810                                   rpccli_pipe_txt(debug_ctx(), cli),
811                                   cli->auth->auth_type ));
812                         return NT_STATUS_INVALID_INFO_CLASS;
813         }
814
815         return NT_STATUS_OK;
816 }
817
818 /****************************************************************************
819  Do basic authentication checks on an incoming pdu.
820  ****************************************************************************/
821
822 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
823                         prs_struct *current_pdu,
824                         uint8 expected_pkt_type,
825                         char **ppdata,
826                         uint32 *pdata_len,
827                         prs_struct *return_data)
828 {
829
830         NTSTATUS ret = NT_STATUS_OK;
831         uint32 current_pdu_len = prs_data_size(current_pdu);
832
833         if (current_pdu_len != prhdr->frag_len) {
834                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
835                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
836                 return NT_STATUS_INVALID_PARAMETER;
837         }
838
839         /*
840          * Point the return values at the real data including the RPC
841          * header. Just in case the caller wants it.
842          */
843         *ppdata = prs_data_p(current_pdu);
844         *pdata_len = current_pdu_len;
845
846         /* Ensure we have the correct type. */
847         switch (prhdr->pkt_type) {
848                 case RPC_ALTCONTRESP:
849                 case RPC_BINDACK:
850
851                         /* Alter context and bind ack share the same packet definitions. */
852                         break;
853
854
855                 case RPC_RESPONSE:
856                 {
857                         RPC_HDR_RESP rhdr_resp;
858                         uint8 ss_padding_len = 0;
859
860                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
861                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
862                                 return NT_STATUS_BUFFER_TOO_SMALL;
863                         }
864
865                         /* Here's where we deal with incoming sign/seal. */
866                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
867                                         current_pdu, &ss_padding_len);
868                         if (!NT_STATUS_IS_OK(ret)) {
869                                 return ret;
870                         }
871
872                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
873                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
874
875                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
876                                 return NT_STATUS_BUFFER_TOO_SMALL;
877                         }
878
879                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
880
881                         /* Remember to remove the auth footer. */
882                         if (prhdr->auth_len) {
883                                 /* We've already done integer wrap tests on auth_len in
884                                         cli_pipe_validate_rpc_response(). */
885                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
886                                         return NT_STATUS_BUFFER_TOO_SMALL;
887                                 }
888                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
889                         }
890
891                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
892                                 current_pdu_len, *pdata_len, ss_padding_len ));
893
894                         /*
895                          * If this is the first reply, and the allocation hint is reasonably, try and
896                          * set up the return_data parse_struct to the correct size.
897                          */
898
899                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
900                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
901                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
902                                                 "too large to allocate\n",
903                                                 (unsigned int)rhdr_resp.alloc_hint ));
904                                         return NT_STATUS_NO_MEMORY;
905                                 }
906                         }
907
908                         break;
909                 }
910
911                 case RPC_BINDNACK:
912                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
913                                   "received from %s!\n",
914                                   rpccli_pipe_txt(debug_ctx(), cli)));
915                         /* Use this for now... */
916                         return NT_STATUS_NETWORK_ACCESS_DENIED;
917
918                 case RPC_FAULT:
919                 {
920                         RPC_HDR_RESP rhdr_resp;
921                         RPC_HDR_FAULT fault_resp;
922
923                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
924                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
925                                 return NT_STATUS_BUFFER_TOO_SMALL;
926                         }
927
928                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
929                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
930                                 return NT_STATUS_BUFFER_TOO_SMALL;
931                         }
932
933                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
934                                   "code %s received from %s!\n",
935                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
936                                 rpccli_pipe_txt(debug_ctx(), cli)));
937                         if (NT_STATUS_IS_OK(fault_resp.status)) {
938                                 return NT_STATUS_UNSUCCESSFUL;
939                         } else {
940                                 return fault_resp.status;
941                         }
942                 }
943
944                 default:
945                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
946                                 "from %s!\n",
947                                 (unsigned int)prhdr->pkt_type,
948                                 rpccli_pipe_txt(debug_ctx(), cli)));
949                         return NT_STATUS_INVALID_INFO_CLASS;
950         }
951
952         if (prhdr->pkt_type != expected_pkt_type) {
953                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
954                           "got an unexpected RPC packet type - %u, not %u\n",
955                         rpccli_pipe_txt(debug_ctx(), cli),
956                         prhdr->pkt_type,
957                         expected_pkt_type));
958                 return NT_STATUS_INVALID_INFO_CLASS;
959         }
960
961         /* Do this just before return - we don't want to modify any rpc header
962            data before now as we may have needed to do cryptographic actions on
963            it before. */
964
965         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
966                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
967                         "setting fragment first/last ON.\n"));
968                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
969         }
970
971         return NT_STATUS_OK;
972 }
973
974 /****************************************************************************
975  Ensure we eat the just processed pdu from the current_pdu prs_struct.
976  Normally the frag_len and buffer size will match, but on the first trans
977  reply there is a theoretical chance that buffer size > frag_len, so we must
978  deal with that.
979  ****************************************************************************/
980
981 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
982 {
983         uint32 current_pdu_len = prs_data_size(current_pdu);
984
985         if (current_pdu_len < prhdr->frag_len) {
986                 return NT_STATUS_BUFFER_TOO_SMALL;
987         }
988
989         /* Common case. */
990         if (current_pdu_len == (uint32)prhdr->frag_len) {
991                 prs_mem_free(current_pdu);
992                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
993                 /* Make current_pdu dynamic with no memory. */
994                 prs_give_memory(current_pdu, 0, 0, True);
995                 return NT_STATUS_OK;
996         }
997
998         /*
999          * Oh no ! More data in buffer than we processed in current pdu.
1000          * Cheat. Move the data down and shrink the buffer.
1001          */
1002
1003         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1004                         current_pdu_len - prhdr->frag_len);
1005
1006         /* Remember to set the read offset back to zero. */
1007         prs_set_offset(current_pdu, 0);
1008
1009         /* Shrink the buffer. */
1010         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1011                 return NT_STATUS_BUFFER_TOO_SMALL;
1012         }
1013
1014         return NT_STATUS_OK;
1015 }
1016
1017 /****************************************************************************
1018  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1019 ****************************************************************************/
1020
1021 struct cli_api_pipe_state {
1022         struct event_context *ev;
1023         struct rpc_cli_transport *transport;
1024         uint8_t *rdata;
1025         uint32_t rdata_len;
1026 };
1027
1028 static void cli_api_pipe_trans_done(struct async_req *subreq);
1029 static void cli_api_pipe_write_done(struct async_req *subreq);
1030 static void cli_api_pipe_read_done(struct async_req *subreq);
1031
1032 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1033                                            struct event_context *ev,
1034                                            struct rpc_cli_transport *transport,
1035                                            uint8_t *data, size_t data_len,
1036                                            uint32_t max_rdata_len)
1037 {
1038         struct async_req *result, *subreq;
1039         struct cli_api_pipe_state *state;
1040         NTSTATUS status;
1041
1042         if (!async_req_setup(mem_ctx, &result, &state,
1043                              struct cli_api_pipe_state)) {
1044                 return NULL;
1045         }
1046         state->ev = ev;
1047         state->transport = transport;
1048
1049         if (max_rdata_len < RPC_HEADER_LEN) {
1050                 /*
1051                  * For a RPC reply we always need at least RPC_HEADER_LEN
1052                  * bytes. We check this here because we will receive
1053                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1054                  */
1055                 status = NT_STATUS_INVALID_PARAMETER;
1056                 goto post_status;
1057         }
1058
1059         if (transport->trans_send != NULL) {
1060                 subreq = transport->trans_send(state, ev, data, data_len,
1061                                                max_rdata_len, transport->priv);
1062                 if (subreq == NULL) {
1063                         status = NT_STATUS_NO_MEMORY;
1064                         goto post_status;
1065                 }
1066                 subreq->async.fn = cli_api_pipe_trans_done;
1067                 subreq->async.priv = result;
1068                 return result;
1069         }
1070
1071         /*
1072          * If the transport does not provide a "trans" routine, i.e. for
1073          * example the ncacn_ip_tcp transport, do the write/read step here.
1074          */
1075
1076         subreq = rpc_write_send(state, ev, transport, data, data_len);
1077         if (subreq == NULL) {
1078                 goto fail;
1079         }
1080         subreq->async.fn = cli_api_pipe_write_done;
1081         subreq->async.priv = result;
1082         return result;
1083
1084         status = NT_STATUS_INVALID_PARAMETER;
1085
1086  post_status:
1087         if (async_post_status(result, ev, status)) {
1088                 return result;
1089         }
1090  fail:
1091         TALLOC_FREE(result);
1092         return NULL;
1093 }
1094
1095 static void cli_api_pipe_trans_done(struct async_req *subreq)
1096 {
1097         struct async_req *req = talloc_get_type_abort(
1098                 subreq->async.priv, struct async_req);
1099         struct cli_api_pipe_state *state = talloc_get_type_abort(
1100                 req->private_data, struct cli_api_pipe_state);
1101         NTSTATUS status;
1102
1103         status = state->transport->trans_recv(subreq, state, &state->rdata,
1104                                               &state->rdata_len);
1105         TALLOC_FREE(subreq);
1106         if (!NT_STATUS_IS_OK(status)) {
1107                 async_req_error(req, status);
1108                 return;
1109         }
1110         async_req_done(req);
1111 }
1112
1113 static void cli_api_pipe_write_done(struct async_req *subreq)
1114 {
1115         struct async_req *req = talloc_get_type_abort(
1116                 subreq->async.priv, struct async_req);
1117         struct cli_api_pipe_state *state = talloc_get_type_abort(
1118                 req->private_data, struct cli_api_pipe_state);
1119         NTSTATUS status;
1120
1121         status = rpc_write_recv(subreq);
1122         TALLOC_FREE(subreq);
1123         if (!NT_STATUS_IS_OK(status)) {
1124                 async_req_error(req, status);
1125                 return;
1126         }
1127
1128         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1129         if (async_req_nomem(state->rdata, req)) {
1130                 return;
1131         }
1132
1133         /*
1134          * We don't need to use rpc_read_send here, the upper layer will cope
1135          * with a short read, transport->trans_send could also return less
1136          * than state->max_rdata_len.
1137          */
1138         subreq = state->transport->read_send(state, state->ev, state->rdata,
1139                                              RPC_HEADER_LEN,
1140                                              state->transport->priv);
1141         if (async_req_nomem(subreq, req)) {
1142                 return;
1143         }
1144         subreq->async.fn = cli_api_pipe_read_done;
1145         subreq->async.priv = req;
1146 }
1147
1148 static void cli_api_pipe_read_done(struct async_req *subreq)
1149 {
1150         struct async_req *req = talloc_get_type_abort(
1151                 subreq->async.priv, struct async_req);
1152         struct cli_api_pipe_state *state = talloc_get_type_abort(
1153                 req->private_data, struct cli_api_pipe_state);
1154         NTSTATUS status;
1155         ssize_t received;
1156
1157         status = state->transport->read_recv(subreq, &received);
1158         TALLOC_FREE(subreq);
1159         if (!NT_STATUS_IS_OK(status)) {
1160                 async_req_error(req, status);
1161                 return;
1162         }
1163         state->rdata_len = received;
1164         async_req_done(req);
1165 }
1166
1167 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1168                                   uint8_t **prdata, uint32_t *prdata_len)
1169 {
1170         struct cli_api_pipe_state *state = talloc_get_type_abort(
1171                 req->private_data, struct cli_api_pipe_state);
1172         NTSTATUS status;
1173
1174         if (async_req_is_error(req, &status)) {
1175                 return status;
1176         }
1177
1178         *prdata = talloc_move(mem_ctx, &state->rdata);
1179         *prdata_len = state->rdata_len;
1180         return NT_STATUS_OK;
1181 }
1182
1183 /****************************************************************************
1184  Send data on an rpc pipe via trans. The prs_struct data must be the last
1185  pdu fragment of an NDR data stream.
1186
1187  Receive response data from an rpc pipe, which may be large...
1188
1189  Read the first fragment: unfortunately have to use SMBtrans for the first
1190  bit, then SMBreadX for subsequent bits.
1191
1192  If first fragment received also wasn't the last fragment, continue
1193  getting fragments until we _do_ receive the last fragment.
1194
1195  Request/Response PDU's look like the following...
1196
1197  |<------------------PDU len----------------------------------------------->|
1198  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1199
1200  +------------+-----------------+-------------+---------------+-------------+
1201  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1202  +------------+-----------------+-------------+---------------+-------------+
1203
1204  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1205  signing & sealing being negotiated.
1206
1207  ****************************************************************************/
1208
1209 struct rpc_api_pipe_state {
1210         struct event_context *ev;
1211         struct rpc_pipe_client *cli;
1212         uint8_t expected_pkt_type;
1213
1214         prs_struct incoming_frag;
1215         struct rpc_hdr_info rhdr;
1216
1217         prs_struct incoming_pdu;        /* Incoming reply */
1218         uint32_t incoming_pdu_offset;
1219 };
1220
1221 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1222 {
1223         prs_mem_free(&state->incoming_frag);
1224         prs_mem_free(&state->incoming_pdu);
1225         return 0;
1226 }
1227
1228 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1229 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1230
1231 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1232                                            struct event_context *ev,
1233                                            struct rpc_pipe_client *cli,
1234                                            prs_struct *data, /* Outgoing PDU */
1235                                            uint8_t expected_pkt_type)
1236 {
1237         struct async_req *result, *subreq;
1238         struct rpc_api_pipe_state *state;
1239         uint16_t max_recv_frag;
1240         NTSTATUS status;
1241
1242         if (!async_req_setup(mem_ctx, &result, &state,
1243                              struct rpc_api_pipe_state)) {
1244                 return NULL;
1245         }
1246         state->ev = ev;
1247         state->cli = cli;
1248         state->expected_pkt_type = expected_pkt_type;
1249         state->incoming_pdu_offset = 0;
1250
1251         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1252
1253         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1254         /* Make incoming_pdu dynamic with no memory. */
1255         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1256
1257         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1258
1259         /*
1260          * Ensure we're not sending too much.
1261          */
1262         if (prs_offset(data) > cli->max_xmit_frag) {
1263                 status = NT_STATUS_INVALID_PARAMETER;
1264                 goto post_status;
1265         }
1266
1267         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1268
1269         max_recv_frag = cli->max_recv_frag;
1270
1271 #ifdef DEVELOPER
1272         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1273 #endif
1274
1275         subreq = cli_api_pipe_send(state, ev, cli->transport,
1276                                    (uint8_t *)prs_data_p(data),
1277                                    prs_offset(data), max_recv_frag);
1278         if (subreq == NULL) {
1279                 status = NT_STATUS_NO_MEMORY;
1280                 goto post_status;
1281         }
1282         subreq->async.fn = rpc_api_pipe_trans_done;
1283         subreq->async.priv = result;
1284         return result;
1285
1286  post_status:
1287         if (async_post_status(result, ev, status)) {
1288                 return result;
1289         }
1290         TALLOC_FREE(result);
1291         return NULL;
1292 }
1293
1294 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1295 {
1296         struct async_req *req = talloc_get_type_abort(
1297                 subreq->async.priv, struct async_req);
1298         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1299                 req->private_data, struct rpc_api_pipe_state);
1300         NTSTATUS status;
1301         uint8_t *rdata = NULL;
1302         uint32_t rdata_len = 0;
1303         char *rdata_copy;
1304
1305         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1306         TALLOC_FREE(subreq);
1307         if (!NT_STATUS_IS_OK(status)) {
1308                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1309                 async_req_error(req, status);
1310                 return;
1311         }
1312
1313         if (rdata == NULL) {
1314                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1315                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1316                 async_req_done(req);
1317                 return;
1318         }
1319
1320         /*
1321          * Give the memory received from cli_trans as dynamic to the current
1322          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1323          * :-(
1324          */
1325         rdata_copy = (char *)memdup(rdata, rdata_len);
1326         TALLOC_FREE(rdata);
1327         if (async_req_nomem(rdata_copy, req)) {
1328                 return;
1329         }
1330         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1331
1332         /* Ensure we have enough data for a pdu. */
1333         subreq = get_complete_frag_send(state, state->ev, state->cli,
1334                                         &state->rhdr, &state->incoming_frag);
1335         if (async_req_nomem(subreq, req)) {
1336                 return;
1337         }
1338         subreq->async.fn = rpc_api_pipe_got_pdu;
1339         subreq->async.priv = req;
1340 }
1341
1342 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1343 {
1344         struct async_req *req = talloc_get_type_abort(
1345                 subreq->async.priv, struct async_req);
1346         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1347                 req->private_data, struct rpc_api_pipe_state);
1348         NTSTATUS status;
1349         char *rdata = NULL;
1350         uint32_t rdata_len = 0;
1351
1352         status = get_complete_frag_recv(subreq);
1353         TALLOC_FREE(subreq);
1354         if (!NT_STATUS_IS_OK(status)) {
1355                 DEBUG(5, ("get_complete_frag failed: %s\n",
1356                           nt_errstr(status)));
1357                 async_req_error(req, status);
1358                 return;
1359         }
1360
1361         status = cli_pipe_validate_current_pdu(
1362                 state->cli, &state->rhdr, &state->incoming_frag,
1363                 state->expected_pkt_type, &rdata, &rdata_len,
1364                 &state->incoming_pdu);
1365
1366         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1367                   (unsigned)prs_data_size(&state->incoming_frag),
1368                   (unsigned)state->incoming_pdu_offset,
1369                   nt_errstr(status)));
1370
1371         if (!NT_STATUS_IS_OK(status)) {
1372                 async_req_error(req, status);
1373                 return;
1374         }
1375
1376         if ((state->rhdr.flags & RPC_FLG_FIRST)
1377             && (state->rhdr.pack_type[0] == 0)) {
1378                 /*
1379                  * Set the data type correctly for big-endian data on the
1380                  * first packet.
1381                  */
1382                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1383                           "big-endian.\n",
1384                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1385                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1386         }
1387         /*
1388          * Check endianness on subsequent packets.
1389          */
1390         if (state->incoming_frag.bigendian_data
1391             != state->incoming_pdu.bigendian_data) {
1392                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1393                          "%s\n",
1394                          state->incoming_pdu.bigendian_data?"big":"little",
1395                          state->incoming_frag.bigendian_data?"big":"little"));
1396                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1397                 return;
1398         }
1399
1400         /* Now copy the data portion out of the pdu into rbuf. */
1401         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1402                 async_req_error(req, NT_STATUS_NO_MEMORY);
1403                 return;
1404         }
1405
1406         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1407                rdata, (size_t)rdata_len);
1408         state->incoming_pdu_offset += rdata_len;
1409
1410         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1411                                             &state->incoming_frag);
1412         if (!NT_STATUS_IS_OK(status)) {
1413                 async_req_error(req, status);
1414                 return;
1415         }
1416
1417         if (state->rhdr.flags & RPC_FLG_LAST) {
1418                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1419                           rpccli_pipe_txt(debug_ctx(), state->cli),
1420                           (unsigned)prs_data_size(&state->incoming_pdu)));
1421                 async_req_done(req);
1422                 return;
1423         }
1424
1425         subreq = get_complete_frag_send(state, state->ev, state->cli,
1426                                         &state->rhdr, &state->incoming_frag);
1427         if (async_req_nomem(subreq, req)) {
1428                 return;
1429         }
1430         subreq->async.fn = rpc_api_pipe_got_pdu;
1431         subreq->async.priv = req;
1432 }
1433
1434 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1435                                   prs_struct *reply_pdu)
1436 {
1437         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1438                 req->private_data, struct rpc_api_pipe_state);
1439         NTSTATUS status;
1440
1441         if (async_req_is_error(req, &status)) {
1442                 return status;
1443         }
1444
1445         *reply_pdu = state->incoming_pdu;
1446         reply_pdu->mem_ctx = mem_ctx;
1447
1448         /*
1449          * Prevent state->incoming_pdu from being freed in
1450          * rpc_api_pipe_state_destructor()
1451          */
1452         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1453
1454         return NT_STATUS_OK;
1455 }
1456
1457 /*******************************************************************
1458  Creates krb5 auth bind.
1459  ********************************************************************/
1460
1461 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1462                                                 enum pipe_auth_level auth_level,
1463                                                 RPC_HDR_AUTH *pauth_out,
1464                                                 prs_struct *auth_data)
1465 {
1466 #ifdef HAVE_KRB5
1467         int ret;
1468         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1469         DATA_BLOB tkt = data_blob_null;
1470         DATA_BLOB tkt_wrapped = data_blob_null;
1471
1472         /* We may change the pad length before marshalling. */
1473         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1474
1475         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1476                 a->service_principal ));
1477
1478         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1479
1480         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1481                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1482
1483         if (ret) {
1484                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1485                         "failed with %s\n",
1486                         a->service_principal,
1487                         error_message(ret) ));
1488
1489                 data_blob_free(&tkt);
1490                 prs_mem_free(auth_data);
1491                 return NT_STATUS_INVALID_PARAMETER;
1492         }
1493
1494         /* wrap that up in a nice GSS-API wrapping */
1495         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1496
1497         data_blob_free(&tkt);
1498
1499         /* Auth len in the rpc header doesn't include auth_header. */
1500         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1501                 data_blob_free(&tkt_wrapped);
1502                 prs_mem_free(auth_data);
1503                 return NT_STATUS_NO_MEMORY;
1504         }
1505
1506         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1507         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1508
1509         data_blob_free(&tkt_wrapped);
1510         return NT_STATUS_OK;
1511 #else
1512         return NT_STATUS_INVALID_PARAMETER;
1513 #endif
1514 }
1515
1516 /*******************************************************************
1517  Creates SPNEGO NTLMSSP auth bind.
1518  ********************************************************************/
1519
1520 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1521                                                 enum pipe_auth_level auth_level,
1522                                                 RPC_HDR_AUTH *pauth_out,
1523                                                 prs_struct *auth_data)
1524 {
1525         NTSTATUS nt_status;
1526         DATA_BLOB null_blob = data_blob_null;
1527         DATA_BLOB request = data_blob_null;
1528         DATA_BLOB spnego_msg = data_blob_null;
1529
1530         /* We may change the pad length before marshalling. */
1531         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1532
1533         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1534         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1535                                         null_blob,
1536                                         &request);
1537
1538         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1539                 data_blob_free(&request);
1540                 prs_mem_free(auth_data);
1541                 return nt_status;
1542         }
1543
1544         /* Wrap this in SPNEGO. */
1545         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1546
1547         data_blob_free(&request);
1548
1549         /* Auth len in the rpc header doesn't include auth_header. */
1550         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1551                 data_blob_free(&spnego_msg);
1552                 prs_mem_free(auth_data);
1553                 return NT_STATUS_NO_MEMORY;
1554         }
1555
1556         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1557         dump_data(5, spnego_msg.data, spnego_msg.length);
1558
1559         data_blob_free(&spnego_msg);
1560         return NT_STATUS_OK;
1561 }
1562
1563 /*******************************************************************
1564  Creates NTLMSSP auth bind.
1565  ********************************************************************/
1566
1567 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1568                                                 enum pipe_auth_level auth_level,
1569                                                 RPC_HDR_AUTH *pauth_out,
1570                                                 prs_struct *auth_data)
1571 {
1572         NTSTATUS nt_status;
1573         DATA_BLOB null_blob = data_blob_null;
1574         DATA_BLOB request = data_blob_null;
1575
1576         /* We may change the pad length before marshalling. */
1577         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1578
1579         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1580         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1581                                         null_blob,
1582                                         &request);
1583
1584         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1585                 data_blob_free(&request);
1586                 prs_mem_free(auth_data);
1587                 return nt_status;
1588         }
1589
1590         /* Auth len in the rpc header doesn't include auth_header. */
1591         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1592                 data_blob_free(&request);
1593                 prs_mem_free(auth_data);
1594                 return NT_STATUS_NO_MEMORY;
1595         }
1596
1597         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1598         dump_data(5, request.data, request.length);
1599
1600         data_blob_free(&request);
1601         return NT_STATUS_OK;
1602 }
1603
1604 /*******************************************************************
1605  Creates schannel auth bind.
1606  ********************************************************************/
1607
1608 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1609                                                 enum pipe_auth_level auth_level,
1610                                                 RPC_HDR_AUTH *pauth_out,
1611                                                 prs_struct *auth_data)
1612 {
1613         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1614
1615         /* We may change the pad length before marshalling. */
1616         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1617
1618         /* Use lp_workgroup() if domain not specified */
1619
1620         if (!cli->auth->domain || !cli->auth->domain[0]) {
1621                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1622                 if (cli->auth->domain == NULL) {
1623                         return NT_STATUS_NO_MEMORY;
1624                 }
1625         }
1626
1627         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1628                                    global_myname());
1629
1630         /*
1631          * Now marshall the data into the auth parse_struct.
1632          */
1633
1634         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1635                                        &schannel_neg, auth_data, 0)) {
1636                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1637                 prs_mem_free(auth_data);
1638                 return NT_STATUS_NO_MEMORY;
1639         }
1640
1641         return NT_STATUS_OK;
1642 }
1643
1644 /*******************************************************************
1645  Creates the internals of a DCE/RPC bind request or alter context PDU.
1646  ********************************************************************/
1647
1648 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1649                                                 prs_struct *rpc_out, 
1650                                                 uint32 rpc_call_id,
1651                                                 const RPC_IFACE *abstract,
1652                                                 const RPC_IFACE *transfer,
1653                                                 RPC_HDR_AUTH *phdr_auth,
1654                                                 prs_struct *pauth_info)
1655 {
1656         RPC_HDR hdr;
1657         RPC_HDR_RB hdr_rb;
1658         RPC_CONTEXT rpc_ctx;
1659         uint16 auth_len = prs_offset(pauth_info);
1660         uint8 ss_padding_len = 0;
1661         uint16 frag_len = 0;
1662
1663         /* create the RPC context. */
1664         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1665
1666         /* create the bind request RPC_HDR_RB */
1667         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1668
1669         /* Start building the frag length. */
1670         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1671
1672         /* Do we need to pad ? */
1673         if (auth_len) {
1674                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1675                 if (data_len % 8) {
1676                         ss_padding_len = 8 - (data_len % 8);
1677                         phdr_auth->auth_pad_len = ss_padding_len;
1678                 }
1679                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1680         }
1681
1682         /* Create the request RPC_HDR */
1683         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1684
1685         /* Marshall the RPC header */
1686         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1687                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1688                 return NT_STATUS_NO_MEMORY;
1689         }
1690
1691         /* Marshall the bind request data */
1692         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1693                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1694                 return NT_STATUS_NO_MEMORY;
1695         }
1696
1697         /*
1698          * Grow the outgoing buffer to store any auth info.
1699          */
1700
1701         if(auth_len != 0) {
1702                 if (ss_padding_len) {
1703                         char pad[8];
1704                         memset(pad, '\0', 8);
1705                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1706                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1707                                 return NT_STATUS_NO_MEMORY;
1708                         }
1709                 }
1710
1711                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1712                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1713                         return NT_STATUS_NO_MEMORY;
1714                 }
1715
1716
1717                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1718                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1719                         return NT_STATUS_NO_MEMORY;
1720                 }
1721         }
1722
1723         return NT_STATUS_OK;
1724 }
1725
1726 /*******************************************************************
1727  Creates a DCE/RPC bind request.
1728  ********************************************************************/
1729
1730 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1731                                 prs_struct *rpc_out, 
1732                                 uint32 rpc_call_id,
1733                                 const RPC_IFACE *abstract,
1734                                 const RPC_IFACE *transfer,
1735                                 enum pipe_auth_type auth_type,
1736                                 enum pipe_auth_level auth_level)
1737 {
1738         RPC_HDR_AUTH hdr_auth;
1739         prs_struct auth_info;
1740         NTSTATUS ret = NT_STATUS_OK;
1741
1742         ZERO_STRUCT(hdr_auth);
1743         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1744                 return NT_STATUS_NO_MEMORY;
1745
1746         switch (auth_type) {
1747                 case PIPE_AUTH_TYPE_SCHANNEL:
1748                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1749                         if (!NT_STATUS_IS_OK(ret)) {
1750                                 prs_mem_free(&auth_info);
1751                                 return ret;
1752                         }
1753                         break;
1754
1755                 case PIPE_AUTH_TYPE_NTLMSSP:
1756                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1757                         if (!NT_STATUS_IS_OK(ret)) {
1758                                 prs_mem_free(&auth_info);
1759                                 return ret;
1760                         }
1761                         break;
1762
1763                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1764                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1765                         if (!NT_STATUS_IS_OK(ret)) {
1766                                 prs_mem_free(&auth_info);
1767                                 return ret;
1768                         }
1769                         break;
1770
1771                 case PIPE_AUTH_TYPE_KRB5:
1772                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1773                         if (!NT_STATUS_IS_OK(ret)) {
1774                                 prs_mem_free(&auth_info);
1775                                 return ret;
1776                         }
1777                         break;
1778
1779                 case PIPE_AUTH_TYPE_NONE:
1780                         break;
1781
1782                 default:
1783                         /* "Can't" happen. */
1784                         return NT_STATUS_INVALID_INFO_CLASS;
1785         }
1786
1787         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1788                                                 rpc_out, 
1789                                                 rpc_call_id,
1790                                                 abstract,
1791                                                 transfer,
1792                                                 &hdr_auth,
1793                                                 &auth_info);
1794
1795         prs_mem_free(&auth_info);
1796         return ret;
1797 }
1798
1799 /*******************************************************************
1800  Create and add the NTLMSSP sign/seal auth header and data.
1801  ********************************************************************/
1802
1803 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1804                                         RPC_HDR *phdr,
1805                                         uint32 ss_padding_len,
1806                                         prs_struct *outgoing_pdu)
1807 {
1808         RPC_HDR_AUTH auth_info;
1809         NTSTATUS status;
1810         DATA_BLOB auth_blob = data_blob_null;
1811         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1812
1813         if (!cli->auth->a_u.ntlmssp_state) {
1814                 return NT_STATUS_INVALID_PARAMETER;
1815         }
1816
1817         /* Init and marshall the auth header. */
1818         init_rpc_hdr_auth(&auth_info,
1819                         map_pipe_auth_type_to_rpc_auth_type(
1820                                 cli->auth->auth_type),
1821                         cli->auth->auth_level,
1822                         ss_padding_len,
1823                         1 /* context id. */);
1824
1825         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1826                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1827                 data_blob_free(&auth_blob);
1828                 return NT_STATUS_NO_MEMORY;
1829         }
1830
1831         switch (cli->auth->auth_level) {
1832                 case PIPE_AUTH_LEVEL_PRIVACY:
1833                         /* Data portion is encrypted. */
1834                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1835                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1836                                         data_and_pad_len,
1837                                         (unsigned char *)prs_data_p(outgoing_pdu),
1838                                         (size_t)prs_offset(outgoing_pdu),
1839                                         &auth_blob);
1840                         if (!NT_STATUS_IS_OK(status)) {
1841                                 data_blob_free(&auth_blob);
1842                                 return status;
1843                         }
1844                         break;
1845
1846                 case PIPE_AUTH_LEVEL_INTEGRITY:
1847                         /* Data is signed. */
1848                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1849                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1850                                         data_and_pad_len,
1851                                         (unsigned char *)prs_data_p(outgoing_pdu),
1852                                         (size_t)prs_offset(outgoing_pdu),
1853                                         &auth_blob);
1854                         if (!NT_STATUS_IS_OK(status)) {
1855                                 data_blob_free(&auth_blob);
1856                                 return status;
1857                         }
1858                         break;
1859
1860                 default:
1861                         /* Can't happen. */
1862                         smb_panic("bad auth level");
1863                         /* Notreached. */
1864                         return NT_STATUS_INVALID_PARAMETER;
1865         }
1866
1867         /* Finally marshall the blob. */
1868
1869         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1870                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1871                         (unsigned int)NTLMSSP_SIG_SIZE));
1872                 data_blob_free(&auth_blob);
1873                 return NT_STATUS_NO_MEMORY;
1874         }
1875
1876         data_blob_free(&auth_blob);
1877         return NT_STATUS_OK;
1878 }
1879
1880 /*******************************************************************
1881  Create and add the schannel sign/seal auth header and data.
1882  ********************************************************************/
1883
1884 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1885                                         RPC_HDR *phdr,
1886                                         uint32 ss_padding_len,
1887                                         prs_struct *outgoing_pdu)
1888 {
1889         RPC_HDR_AUTH auth_info;
1890         RPC_AUTH_SCHANNEL_CHK verf;
1891         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1892         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1893         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1894
1895         if (!sas) {
1896                 return NT_STATUS_INVALID_PARAMETER;
1897         }
1898
1899         /* Init and marshall the auth header. */
1900         init_rpc_hdr_auth(&auth_info,
1901                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1902                         cli->auth->auth_level,
1903                         ss_padding_len,
1904                         1 /* context id. */);
1905
1906         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1907                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1908                 return NT_STATUS_NO_MEMORY;
1909         }
1910
1911         switch (cli->auth->auth_level) {
1912                 case PIPE_AUTH_LEVEL_PRIVACY:
1913                 case PIPE_AUTH_LEVEL_INTEGRITY:
1914                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1915                                 sas->seq_num));
1916
1917                         schannel_encode(sas,
1918                                         cli->auth->auth_level,
1919                                         SENDER_IS_INITIATOR,
1920                                         &verf,
1921                                         data_p,
1922                                         data_and_pad_len);
1923
1924                         sas->seq_num++;
1925                         break;
1926
1927                 default:
1928                         /* Can't happen. */
1929                         smb_panic("bad auth level");
1930                         /* Notreached. */
1931                         return NT_STATUS_INVALID_PARAMETER;
1932         }
1933
1934         /* Finally marshall the blob. */
1935         smb_io_rpc_auth_schannel_chk("",
1936                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1937                         &verf,
1938                         outgoing_pdu,
1939                         0);
1940
1941         return NT_STATUS_OK;
1942 }
1943
1944 /*******************************************************************
1945  Calculate how much data we're going to send in this packet, also
1946  work out any sign/seal padding length.
1947  ********************************************************************/
1948
1949 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1950                                         uint32 data_left,
1951                                         uint16 *p_frag_len,
1952                                         uint16 *p_auth_len,
1953                                         uint32 *p_ss_padding)
1954 {
1955         uint32 data_space, data_len;
1956
1957 #ifdef DEVELOPER
1958         if ((data_left > 0) && (sys_random() % 2)) {
1959                 data_left = MAX(data_left/2, 1);
1960         }
1961 #endif
1962
1963         switch (cli->auth->auth_level) {
1964                 case PIPE_AUTH_LEVEL_NONE:
1965                 case PIPE_AUTH_LEVEL_CONNECT:
1966                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1967                         data_len = MIN(data_space, data_left);
1968                         *p_ss_padding = 0;
1969                         *p_auth_len = 0;
1970                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1971                         return data_len;
1972
1973                 case PIPE_AUTH_LEVEL_INTEGRITY:
1974                 case PIPE_AUTH_LEVEL_PRIVACY:
1975                         /* Treat the same for all authenticated rpc requests. */
1976                         switch(cli->auth->auth_type) {
1977                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1978                                 case PIPE_AUTH_TYPE_NTLMSSP:
1979                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1980                                         break;
1981                                 case PIPE_AUTH_TYPE_SCHANNEL:
1982                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1983                                         break;
1984                                 default:
1985                                         smb_panic("bad auth type");
1986                                         break;
1987                         }
1988
1989                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1990                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
1991
1992                         data_len = MIN(data_space, data_left);
1993                         *p_ss_padding = 0;
1994                         if (data_len % 8) {
1995                                 *p_ss_padding = 8 - (data_len % 8);
1996                         }
1997                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
1998                                         data_len + *p_ss_padding +              /* data plus padding. */
1999                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2000                         return data_len;
2001
2002                 default:
2003                         smb_panic("bad auth level");
2004                         /* Notreached. */
2005                         return 0;
2006         }
2007 }
2008
2009 /*******************************************************************
2010  External interface.
2011  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2012  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2013  and deals with signing/sealing details.
2014  ********************************************************************/
2015
2016 struct rpc_api_pipe_req_state {
2017         struct event_context *ev;
2018         struct rpc_pipe_client *cli;
2019         uint8_t op_num;
2020         uint32_t call_id;
2021         prs_struct *req_data;
2022         uint32_t req_data_sent;
2023         prs_struct outgoing_frag;
2024         prs_struct reply_pdu;
2025 };
2026
2027 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2028 {
2029         prs_mem_free(&s->outgoing_frag);
2030         prs_mem_free(&s->reply_pdu);
2031         return 0;
2032 }
2033
2034 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2035 static void rpc_api_pipe_req_done(struct async_req *subreq);
2036 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2037                                   bool *is_last_frag);
2038
2039 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2040                                         struct event_context *ev,
2041                                         struct rpc_pipe_client *cli,
2042                                         uint8_t op_num,
2043                                         prs_struct *req_data)
2044 {
2045         struct async_req *result, *subreq;
2046         struct rpc_api_pipe_req_state *state;
2047         NTSTATUS status;
2048         bool is_last_frag;
2049
2050         if (!async_req_setup(mem_ctx, &result, &state,
2051                              struct rpc_api_pipe_req_state)) {
2052                 return NULL;
2053         }
2054         state->ev = ev;
2055         state->cli = cli;
2056         state->op_num = op_num;
2057         state->req_data = req_data;
2058         state->req_data_sent = 0;
2059         state->call_id = get_rpc_call_id();
2060
2061         if (cli->max_xmit_frag
2062             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2063                 /* Server is screwed up ! */
2064                 status = NT_STATUS_INVALID_PARAMETER;
2065                 goto post_status;
2066         }
2067
2068         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2069
2070         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2071                       state, MARSHALL)) {
2072                 status = NT_STATUS_NO_MEMORY;
2073                 goto post_status;
2074         }
2075
2076         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2077
2078         status = prepare_next_frag(state, &is_last_frag);
2079         if (!NT_STATUS_IS_OK(status)) {
2080                 goto post_status;
2081         }
2082
2083         if (is_last_frag) {
2084                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2085                                            &state->outgoing_frag,
2086                                            RPC_RESPONSE);
2087                 if (subreq == NULL) {
2088                         status = NT_STATUS_NO_MEMORY;
2089                         goto post_status;
2090                 }
2091                 subreq->async.fn = rpc_api_pipe_req_done;
2092                 subreq->async.priv = result;
2093         } else {
2094                 subreq = rpc_write_send(
2095                         state, ev, cli->transport,
2096                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2097                         prs_offset(&state->outgoing_frag));
2098                 if (subreq == NULL) {
2099                         status = NT_STATUS_NO_MEMORY;
2100                         goto post_status;
2101                 }
2102                 subreq->async.fn = rpc_api_pipe_req_write_done;
2103                 subreq->async.priv = result;
2104         }
2105         return result;
2106
2107  post_status:
2108         if (async_post_status(result, ev, status)) {
2109                 return result;
2110         }
2111         TALLOC_FREE(result);
2112         return NULL;
2113 }
2114
2115 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2116                                   bool *is_last_frag)
2117 {
2118         RPC_HDR hdr;
2119         RPC_HDR_REQ hdr_req;
2120         uint32_t data_sent_thistime;
2121         uint16_t auth_len;
2122         uint16_t frag_len;
2123         uint8_t flags = 0;
2124         uint32_t ss_padding;
2125         uint32_t data_left;
2126         char pad[8] = { 0, };
2127         NTSTATUS status;
2128
2129         data_left = prs_offset(state->req_data) - state->req_data_sent;
2130
2131         data_sent_thistime = calculate_data_len_tosend(
2132                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2133
2134         if (state->req_data_sent == 0) {
2135                 flags = RPC_FLG_FIRST;
2136         }
2137
2138         if (data_sent_thistime == data_left) {
2139                 flags |= RPC_FLG_LAST;
2140         }
2141
2142         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2143                 return NT_STATUS_NO_MEMORY;
2144         }
2145
2146         /* Create and marshall the header and request header. */
2147         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2148                      auth_len);
2149
2150         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2151                 return NT_STATUS_NO_MEMORY;
2152         }
2153
2154         /* Create the rpc request RPC_HDR_REQ */
2155         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2156                          state->op_num);
2157
2158         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2159                                 &state->outgoing_frag, 0)) {
2160                 return NT_STATUS_NO_MEMORY;
2161         }
2162
2163         /* Copy in the data, plus any ss padding. */
2164         if (!prs_append_some_prs_data(&state->outgoing_frag,
2165                                       state->req_data, state->req_data_sent,
2166                                       data_sent_thistime)) {
2167                 return NT_STATUS_NO_MEMORY;
2168         }
2169
2170         /* Copy the sign/seal padding data. */
2171         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2172                 return NT_STATUS_NO_MEMORY;
2173         }
2174
2175         /* Generate any auth sign/seal and add the auth footer. */
2176         switch (state->cli->auth->auth_type) {
2177         case PIPE_AUTH_TYPE_NONE:
2178                 status = NT_STATUS_OK;
2179                 break;
2180         case PIPE_AUTH_TYPE_NTLMSSP:
2181         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2182                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2183                                                  &state->outgoing_frag);
2184                 break;
2185         case PIPE_AUTH_TYPE_SCHANNEL:
2186                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2187                                                   &state->outgoing_frag);
2188                 break;
2189         default:
2190                 status = NT_STATUS_INVALID_PARAMETER;
2191                 break;
2192         }
2193
2194         state->req_data_sent += data_sent_thistime;
2195         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2196
2197         return status;
2198 }
2199
2200 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2201 {
2202         struct async_req *req = talloc_get_type_abort(
2203                 subreq->async.priv, struct async_req);
2204         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2205                 req->private_data, struct rpc_api_pipe_req_state);
2206         NTSTATUS status;
2207         bool is_last_frag;
2208
2209         status = rpc_write_recv(subreq);
2210         TALLOC_FREE(subreq);
2211         if (!NT_STATUS_IS_OK(status)) {
2212                 async_req_error(req, status);
2213                 return;
2214         }
2215
2216         status = prepare_next_frag(state, &is_last_frag);
2217         if (!NT_STATUS_IS_OK(status)) {
2218                 async_req_error(req, status);
2219                 return;
2220         }
2221
2222         if (is_last_frag) {
2223                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2224                                            &state->outgoing_frag,
2225                                            RPC_RESPONSE);
2226                 if (async_req_nomem(subreq, req)) {
2227                         return;
2228                 }
2229                 subreq->async.fn = rpc_api_pipe_req_done;
2230                 subreq->async.priv = req;
2231         } else {
2232                 subreq = rpc_write_send(
2233                         state, state->ev,
2234                         state->cli->transport,
2235                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2236                         prs_offset(&state->outgoing_frag));
2237                 if (async_req_nomem(subreq, req)) {
2238                         return;
2239                 }
2240                 subreq->async.fn = rpc_api_pipe_req_write_done;
2241                 subreq->async.priv = req;
2242         }
2243 }
2244
2245 static void rpc_api_pipe_req_done(struct async_req *subreq)
2246 {
2247         struct async_req *req = talloc_get_type_abort(
2248                 subreq->async.priv, struct async_req);
2249         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2250                 req->private_data, struct rpc_api_pipe_req_state);
2251         NTSTATUS status;
2252
2253         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2254         TALLOC_FREE(subreq);
2255         if (!NT_STATUS_IS_OK(status)) {
2256                 async_req_error(req, status);
2257                 return;
2258         }
2259         async_req_done(req);
2260 }
2261
2262 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2263                                prs_struct *reply_pdu)
2264 {
2265         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2266                 req->private_data, struct rpc_api_pipe_req_state);
2267         NTSTATUS status;
2268
2269         if (async_req_is_error(req, &status)) {
2270                 /*
2271                  * We always have to initialize to reply pdu, even if there is
2272                  * none. The rpccli_* caller routines expect this.
2273                  */
2274                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2275                 return status;
2276         }
2277
2278         *reply_pdu = state->reply_pdu;
2279         reply_pdu->mem_ctx = mem_ctx;
2280
2281         /*
2282          * Prevent state->req_pdu from being freed in
2283          * rpc_api_pipe_req_state_destructor()
2284          */
2285         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2286
2287         return NT_STATUS_OK;
2288 }
2289
2290 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2291                         uint8 op_num,
2292                         prs_struct *in_data,
2293                         prs_struct *out_data)
2294 {
2295         TALLOC_CTX *frame = talloc_stackframe();
2296         struct event_context *ev;
2297         struct async_req *req;
2298         NTSTATUS status = NT_STATUS_NO_MEMORY;
2299
2300         ev = event_context_init(frame);
2301         if (ev == NULL) {
2302                 goto fail;
2303         }
2304
2305         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2306         if (req == NULL) {
2307                 goto fail;
2308         }
2309
2310         while (req->state < ASYNC_REQ_DONE) {
2311                 event_loop_once(ev);
2312         }
2313
2314         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2315  fail:
2316         TALLOC_FREE(frame);
2317         return status;
2318 }
2319
2320 #if 0
2321 /****************************************************************************
2322  Set the handle state.
2323 ****************************************************************************/
2324
2325 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2326                                    const char *pipe_name, uint16 device_state)
2327 {
2328         bool state_set = False;
2329         char param[2];
2330         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2331         char *rparam = NULL;
2332         char *rdata = NULL;
2333         uint32 rparam_len, rdata_len;
2334
2335         if (pipe_name == NULL)
2336                 return False;
2337
2338         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2339                  cli->fnum, pipe_name, device_state));
2340
2341         /* create parameters: device state */
2342         SSVAL(param, 0, device_state);
2343
2344         /* create setup parameters. */
2345         setup[0] = 0x0001; 
2346         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2347
2348         /* send the data on \PIPE\ */
2349         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2350                     setup, 2, 0,                /* setup, length, max */
2351                     param, 2, 0,                /* param, length, max */
2352                     NULL, 0, 1024,              /* data, length, max */
2353                     &rparam, &rparam_len,        /* return param, length */
2354                     &rdata, &rdata_len))         /* return data, length */
2355         {
2356                 DEBUG(5, ("Set Handle state: return OK\n"));
2357                 state_set = True;
2358         }
2359
2360         SAFE_FREE(rparam);
2361         SAFE_FREE(rdata);
2362
2363         return state_set;
2364 }
2365 #endif
2366
2367 /****************************************************************************
2368  Check the rpc bind acknowledge response.
2369 ****************************************************************************/
2370
2371 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2372 {
2373         if ( hdr_ba->addr.len == 0) {
2374                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2375         }
2376
2377         /* check the transfer syntax */
2378         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2379              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2380                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2381                 return False;
2382         }
2383
2384         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2385                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2386                           hdr_ba->res.num_results, hdr_ba->res.reason));
2387         }
2388
2389         DEBUG(5,("check_bind_response: accepted!\n"));
2390         return True;
2391 }
2392
2393 /*******************************************************************
2394  Creates a DCE/RPC bind authentication response.
2395  This is the packet that is sent back to the server once we
2396  have received a BIND-ACK, to finish the third leg of
2397  the authentication handshake.
2398  ********************************************************************/
2399
2400 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2401                                 uint32 rpc_call_id,
2402                                 enum pipe_auth_type auth_type,
2403                                 enum pipe_auth_level auth_level,
2404                                 DATA_BLOB *pauth_blob,
2405                                 prs_struct *rpc_out)
2406 {
2407         RPC_HDR hdr;
2408         RPC_HDR_AUTH hdr_auth;
2409         uint32 pad = 0;
2410
2411         /* Create the request RPC_HDR */
2412         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2413                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2414                      pauth_blob->length );
2415
2416         /* Marshall it. */
2417         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2418                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2419                 return NT_STATUS_NO_MEMORY;
2420         }
2421
2422         /*
2423                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2424                 about padding - shouldn't this pad to length 8 ? JRA.
2425         */
2426
2427         /* 4 bytes padding. */
2428         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2429                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2430                 return NT_STATUS_NO_MEMORY;
2431         }
2432
2433         /* Create the request RPC_HDR_AUTHA */
2434         init_rpc_hdr_auth(&hdr_auth,
2435                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2436                         auth_level, 0, 1);
2437
2438         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2439                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2440                 return NT_STATUS_NO_MEMORY;
2441         }
2442
2443         /*
2444          * Append the auth data to the outgoing buffer.
2445          */
2446
2447         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2448                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2449                 return NT_STATUS_NO_MEMORY;
2450         }
2451
2452         return NT_STATUS_OK;
2453 }
2454
2455 /*******************************************************************
2456  Creates a DCE/RPC bind alter context authentication request which
2457  may contain a spnego auth blobl
2458  ********************************************************************/
2459
2460 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2461                                         const RPC_IFACE *abstract,
2462                                         const RPC_IFACE *transfer,
2463                                         enum pipe_auth_level auth_level,
2464                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2465                                         prs_struct *rpc_out)
2466 {
2467         RPC_HDR_AUTH hdr_auth;
2468         prs_struct auth_info;
2469         NTSTATUS ret = NT_STATUS_OK;
2470
2471         ZERO_STRUCT(hdr_auth);
2472         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2473                 return NT_STATUS_NO_MEMORY;
2474
2475         /* We may change the pad length before marshalling. */
2476         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2477
2478         if (pauth_blob->length) {
2479                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2480                         prs_mem_free(&auth_info);
2481                         return NT_STATUS_NO_MEMORY;
2482                 }
2483         }
2484
2485         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2486                                                 rpc_out, 
2487                                                 rpc_call_id,
2488                                                 abstract,
2489                                                 transfer,
2490                                                 &hdr_auth,
2491                                                 &auth_info);
2492         prs_mem_free(&auth_info);
2493         return ret;
2494 }
2495
2496 /****************************************************************************
2497  Do an rpc bind.
2498 ****************************************************************************/
2499
2500 struct rpc_pipe_bind_state {
2501         struct event_context *ev;
2502         struct rpc_pipe_client *cli;
2503         prs_struct rpc_out;
2504         uint32_t rpc_call_id;
2505 };
2506
2507 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2508 {
2509         prs_mem_free(&state->rpc_out);
2510         return 0;
2511 }
2512
2513 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2514 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2515                                            struct rpc_pipe_bind_state *state,
2516                                            struct rpc_hdr_info *phdr,
2517                                            prs_struct *reply_pdu);
2518 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2519 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2520                                                     struct rpc_pipe_bind_state *state,
2521                                                     struct rpc_hdr_info *phdr,
2522                                                     prs_struct *reply_pdu);
2523 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2524
2525 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2526                                      struct event_context *ev,
2527                                      struct rpc_pipe_client *cli,
2528                                      struct cli_pipe_auth_data *auth)
2529 {
2530         struct async_req *result, *subreq;
2531         struct rpc_pipe_bind_state *state;
2532         NTSTATUS status;
2533
2534         if (!async_req_setup(mem_ctx, &result, &state,
2535                              struct rpc_pipe_bind_state)) {
2536                 return NULL;
2537         }
2538
2539         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2540                 rpccli_pipe_txt(debug_ctx(), cli),
2541                 (unsigned int)auth->auth_type,
2542                 (unsigned int)auth->auth_level ));
2543
2544         state->ev = ev;
2545         state->cli = cli;
2546         state->rpc_call_id = get_rpc_call_id();
2547
2548         prs_init_empty(&state->rpc_out, state, MARSHALL);
2549         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2550
2551         cli->auth = talloc_move(cli, &auth);
2552
2553         /* Marshall the outgoing data. */
2554         status = create_rpc_bind_req(cli, &state->rpc_out,
2555                                      state->rpc_call_id,
2556                                      &cli->abstract_syntax,
2557                                      &cli->transfer_syntax,
2558                                      cli->auth->auth_type,
2559                                      cli->auth->auth_level);
2560
2561         if (!NT_STATUS_IS_OK(status)) {
2562                 goto post_status;
2563         }
2564
2565         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2566                                    RPC_BINDACK);
2567         if (subreq == NULL) {
2568                 status = NT_STATUS_NO_MEMORY;
2569                 goto post_status;
2570         }
2571         subreq->async.fn = rpc_pipe_bind_step_one_done;
2572         subreq->async.priv = result;
2573         return result;
2574
2575  post_status:
2576         if (async_post_status(result, ev, status)) {
2577                 return result;
2578         }
2579         TALLOC_FREE(result);
2580         return NULL;
2581 }
2582
2583 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2584 {
2585         struct async_req *req = talloc_get_type_abort(
2586                 subreq->async.priv, struct async_req);
2587         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2588                 req->private_data, struct rpc_pipe_bind_state);
2589         prs_struct reply_pdu;
2590         struct rpc_hdr_info hdr;
2591         struct rpc_hdr_ba_info hdr_ba;
2592         NTSTATUS status;
2593
2594         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2595         TALLOC_FREE(subreq);
2596         if (!NT_STATUS_IS_OK(status)) {
2597                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2598                           rpccli_pipe_txt(debug_ctx(), state->cli),
2599                           nt_errstr(status)));
2600                 async_req_error(req, status);
2601                 return;
2602         }
2603
2604         /* Unmarshall the RPC header */
2605         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2606                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2607                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2608                 return;
2609         }
2610
2611         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2612                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2613                           "RPC_HDR_BA.\n"));
2614                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2615                 return;
2616         }
2617
2618         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2619                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2620                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2621                 return;
2622         }
2623
2624         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2625         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2626
2627         /*
2628          * For authenticated binds we may need to do 3 or 4 leg binds.
2629          */
2630
2631         switch(state->cli->auth->auth_type) {
2632
2633         case PIPE_AUTH_TYPE_NONE:
2634         case PIPE_AUTH_TYPE_SCHANNEL:
2635                 /* Bind complete. */
2636                 async_req_done(req);
2637                 break;
2638
2639         case PIPE_AUTH_TYPE_NTLMSSP:
2640                 /* Need to send AUTH3 packet - no reply. */
2641                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2642                                                     &reply_pdu);
2643                 if (!NT_STATUS_IS_OK(status)) {
2644                         async_req_error(req, status);
2645                 }
2646                 break;
2647
2648         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2649                 /* Need to send alter context request and reply. */
2650                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2651                                                              &reply_pdu);
2652                 if (!NT_STATUS_IS_OK(status)) {
2653                         async_req_error(req, status);
2654                 }
2655                 break;
2656
2657         case PIPE_AUTH_TYPE_KRB5:
2658                 /* */
2659
2660         default:
2661                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2662                          (unsigned int)state->cli->auth->auth_type));
2663                 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2664         }
2665 }
2666
2667 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2668                                            struct rpc_pipe_bind_state *state,
2669                                            struct rpc_hdr_info *phdr,
2670                                            prs_struct *reply_pdu)
2671 {
2672         DATA_BLOB server_response = data_blob_null;
2673         DATA_BLOB client_reply = data_blob_null;
2674         struct rpc_hdr_auth_info hdr_auth;
2675         struct async_req *subreq;
2676         NTSTATUS status;
2677
2678         if ((phdr->auth_len == 0)
2679             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2680                 return NT_STATUS_INVALID_PARAMETER;
2681         }
2682
2683         if (!prs_set_offset(
2684                     reply_pdu,
2685                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2686                 return NT_STATUS_INVALID_PARAMETER;
2687         }
2688
2689         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2690                 return NT_STATUS_INVALID_PARAMETER;
2691         }
2692
2693         /* TODO - check auth_type/auth_level match. */
2694
2695         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2696         prs_copy_data_out((char *)server_response.data, reply_pdu,
2697                           phdr->auth_len);
2698
2699         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2700                                 server_response, &client_reply);
2701
2702         if (!NT_STATUS_IS_OK(status)) {
2703                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2704                           "blob failed: %s.\n", nt_errstr(status)));
2705                 return status;
2706         }
2707
2708         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2709
2710         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2711                                        state->cli->auth->auth_type,
2712                                        state->cli->auth->auth_level,
2713                                        &client_reply, &state->rpc_out);
2714         data_blob_free(&client_reply);
2715
2716         if (!NT_STATUS_IS_OK(status)) {
2717                 return status;
2718         }
2719
2720         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2721                                 (uint8_t *)prs_data_p(&state->rpc_out),
2722                                 prs_offset(&state->rpc_out));
2723         if (subreq == NULL) {
2724                 return NT_STATUS_NO_MEMORY;
2725         }
2726         subreq->async.fn = rpc_bind_auth3_write_done;
2727         subreq->async.priv = req;
2728         return NT_STATUS_OK;
2729 }
2730
2731 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2732 {
2733         struct async_req *req = talloc_get_type_abort(
2734                 subreq->async.priv, struct async_req);
2735         NTSTATUS status;
2736
2737         status = rpc_write_recv(subreq);
2738         TALLOC_FREE(subreq);
2739         if (!NT_STATUS_IS_OK(status)) {
2740                 async_req_error(req, status);
2741                 return;
2742         }
2743         async_req_done(req);
2744 }
2745
2746 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2747                                                     struct rpc_pipe_bind_state *state,
2748                                                     struct rpc_hdr_info *phdr,
2749                                                     prs_struct *reply_pdu)
2750 {
2751         DATA_BLOB server_spnego_response = data_blob_null;
2752         DATA_BLOB server_ntlm_response = data_blob_null;
2753         DATA_BLOB client_reply = data_blob_null;
2754         DATA_BLOB tmp_blob = data_blob_null;
2755         RPC_HDR_AUTH hdr_auth;
2756         struct async_req *subreq;
2757         NTSTATUS status;
2758
2759         if ((phdr->auth_len == 0)
2760             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2761                 return NT_STATUS_INVALID_PARAMETER;
2762         }
2763
2764         /* Process the returned NTLMSSP blob first. */
2765         if (!prs_set_offset(
2766                     reply_pdu,
2767                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2768                 return NT_STATUS_INVALID_PARAMETER;
2769         }
2770
2771         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2772                 return NT_STATUS_INVALID_PARAMETER;
2773         }
2774
2775         server_spnego_response = data_blob(NULL, phdr->auth_len);
2776         prs_copy_data_out((char *)server_spnego_response.data,
2777                           reply_pdu, phdr->auth_len);
2778
2779         /*
2780          * The server might give us back two challenges - tmp_blob is for the
2781          * second.
2782          */
2783         if (!spnego_parse_challenge(server_spnego_response,
2784                                     &server_ntlm_response, &tmp_blob)) {
2785                 data_blob_free(&server_spnego_response);
2786                 data_blob_free(&server_ntlm_response);
2787                 data_blob_free(&tmp_blob);
2788                 return NT_STATUS_INVALID_PARAMETER;
2789         }
2790
2791         /* We're finished with the server spnego response and the tmp_blob. */
2792         data_blob_free(&server_spnego_response);
2793         data_blob_free(&tmp_blob);
2794
2795         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796                                 server_ntlm_response, &client_reply);
2797
2798         /* Finished with the server_ntlm response */
2799         data_blob_free(&server_ntlm_response);
2800
2801         if (!NT_STATUS_IS_OK(status)) {
2802                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2803                           "using server blob failed.\n"));
2804                 data_blob_free(&client_reply);
2805                 return status;
2806         }
2807
2808         /* SPNEGO wrap the client reply. */
2809         tmp_blob = spnego_gen_auth(client_reply);
2810         data_blob_free(&client_reply);
2811         client_reply = tmp_blob;
2812         tmp_blob = data_blob_null;
2813
2814         /* Now prepare the alter context pdu. */
2815         prs_init_empty(&state->rpc_out, state, MARSHALL);
2816
2817         status = create_rpc_alter_context(state->rpc_call_id,
2818                                           &state->cli->abstract_syntax,
2819                                           &state->cli->transfer_syntax,
2820                                           state->cli->auth->auth_level,
2821                                           &client_reply,
2822                                           &state->rpc_out);
2823         data_blob_free(&client_reply);
2824
2825         if (!NT_STATUS_IS_OK(status)) {
2826                 return status;
2827         }
2828
2829         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2830                                    &state->rpc_out, RPC_ALTCONTRESP);
2831         if (subreq == NULL) {
2832                 return NT_STATUS_NO_MEMORY;
2833         }
2834         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2835         subreq->async.priv = req;
2836         return NT_STATUS_OK;
2837 }
2838
2839 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2840 {
2841         struct async_req *req = talloc_get_type_abort(
2842                 subreq->async.priv, struct async_req);
2843         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2844                 req->private_data, struct rpc_pipe_bind_state);
2845         DATA_BLOB server_spnego_response = data_blob_null;
2846         DATA_BLOB tmp_blob = data_blob_null;
2847         prs_struct reply_pdu;
2848         struct rpc_hdr_info hdr;
2849         struct rpc_hdr_auth_info hdr_auth;
2850         NTSTATUS status;
2851
2852         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2853         TALLOC_FREE(subreq);
2854         if (!NT_STATUS_IS_OK(status)) {
2855                 async_req_error(req, status);
2856                 return;
2857         }
2858
2859         /* Get the auth blob from the reply. */
2860         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2861                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2862                           "unmarshall RPC_HDR.\n"));
2863                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2864                 return;
2865         }
2866
2867         if (!prs_set_offset(
2868                     &reply_pdu,
2869                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2870                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2871                 return;
2872         }
2873
2874         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2875                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2876                 return;
2877         }
2878
2879         server_spnego_response = data_blob(NULL, hdr.auth_len);
2880         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2881                           hdr.auth_len);
2882
2883         /* Check we got a valid auth response. */
2884         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2885                                         OID_NTLMSSP, &tmp_blob)) {
2886                 data_blob_free(&server_spnego_response);
2887                 data_blob_free(&tmp_blob);
2888                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2889                 return;
2890         }
2891
2892         data_blob_free(&server_spnego_response);
2893         data_blob_free(&tmp_blob);
2894
2895         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2896                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2897         async_req_done(req);
2898 }
2899
2900 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2901 {
2902         return async_req_simple_recv(req);
2903 }
2904
2905 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2906                        struct cli_pipe_auth_data *auth)
2907 {
2908         TALLOC_CTX *frame = talloc_stackframe();
2909         struct event_context *ev;
2910         struct async_req *req;
2911         NTSTATUS status = NT_STATUS_NO_MEMORY;
2912
2913         ev = event_context_init(frame);
2914         if (ev == NULL) {
2915                 goto fail;
2916         }
2917
2918         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2919         if (req == NULL) {
2920                 goto fail;
2921         }
2922
2923         while (req->state < ASYNC_REQ_DONE) {
2924                 event_loop_once(ev);
2925         }
2926
2927         status = rpc_pipe_bind_recv(req);
2928  fail:
2929         TALLOC_FREE(frame);
2930         return status;
2931 }
2932
2933 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2934                                 unsigned int timeout)
2935 {
2936         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2937
2938         if (cli == NULL) {
2939                 return 0;
2940         }
2941         return cli_set_timeout(cli, timeout);
2942 }
2943
2944 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2945 {
2946         struct cli_state *cli;
2947
2948         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2949             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2950                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2951                 return true;
2952         }
2953
2954         cli = rpc_pipe_np_smb_conn(rpc_cli);
2955         if (cli == NULL) {
2956                 return false;
2957         }
2958         E_md4hash(cli->pwd.password, nt_hash);
2959         return true;
2960 }
2961
2962 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2963                                struct cli_pipe_auth_data **presult)
2964 {
2965         struct cli_pipe_auth_data *result;
2966
2967         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2968         if (result == NULL) {
2969                 return NT_STATUS_NO_MEMORY;
2970         }
2971
2972         result->auth_type = PIPE_AUTH_TYPE_NONE;
2973         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2974
2975         result->user_name = talloc_strdup(result, "");
2976         result->domain = talloc_strdup(result, "");
2977         if ((result->user_name == NULL) || (result->domain == NULL)) {
2978                 TALLOC_FREE(result);
2979                 return NT_STATUS_NO_MEMORY;
2980         }
2981
2982         *presult = result;
2983         return NT_STATUS_OK;
2984 }
2985
2986 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2987 {
2988         ntlmssp_end(&auth->a_u.ntlmssp_state);
2989         return 0;
2990 }
2991
2992 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
2993                                   enum pipe_auth_type auth_type,
2994                                   enum pipe_auth_level auth_level,
2995                                   const char *domain,
2996                                   const char *username,
2997                                   const char *password,
2998                                   struct cli_pipe_auth_data **presult)
2999 {
3000         struct cli_pipe_auth_data *result;
3001         NTSTATUS status;
3002
3003         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3004         if (result == NULL) {
3005                 return NT_STATUS_NO_MEMORY;
3006         }
3007
3008         result->auth_type = auth_type;
3009         result->auth_level = auth_level;
3010
3011         result->user_name = talloc_strdup(result, username);
3012         result->domain = talloc_strdup(result, domain);
3013         if ((result->user_name == NULL) || (result->domain == NULL)) {
3014                 status = NT_STATUS_NO_MEMORY;
3015                 goto fail;
3016         }
3017
3018         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3019         if (!NT_STATUS_IS_OK(status)) {
3020                 goto fail;
3021         }
3022
3023         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3024
3025         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3026         if (!NT_STATUS_IS_OK(status)) {
3027                 goto fail;
3028         }
3029
3030         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3031         if (!NT_STATUS_IS_OK(status)) {
3032                 goto fail;
3033         }
3034
3035         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3036         if (!NT_STATUS_IS_OK(status)) {
3037                 goto fail;
3038         }
3039
3040         /*
3041          * Turn off sign+seal to allow selected auth level to turn it back on.
3042          */
3043         result->a_u.ntlmssp_state->neg_flags &=
3044                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3045
3046         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3047                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3048         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3049                 result->a_u.ntlmssp_state->neg_flags
3050                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3051         }
3052
3053         *presult = result;
3054         return NT_STATUS_OK;
3055
3056  fail:
3057         TALLOC_FREE(result);
3058         return status;
3059 }
3060
3061 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3062                                    enum pipe_auth_level auth_level,
3063                                    const uint8_t sess_key[16],
3064                                    struct cli_pipe_auth_data **presult)
3065 {
3066         struct cli_pipe_auth_data *result;
3067
3068         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3069         if (result == NULL) {
3070                 return NT_STATUS_NO_MEMORY;
3071         }
3072
3073         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3074         result->auth_level = auth_level;
3075
3076         result->user_name = talloc_strdup(result, "");
3077         result->domain = talloc_strdup(result, domain);
3078         if ((result->user_name == NULL) || (result->domain == NULL)) {
3079                 goto fail;
3080         }
3081
3082         result->a_u.schannel_auth = talloc(result,
3083                                            struct schannel_auth_struct);
3084         if (result->a_u.schannel_auth == NULL) {
3085                 goto fail;
3086         }
3087
3088         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3089                sizeof(result->a_u.schannel_auth->sess_key));
3090         result->a_u.schannel_auth->seq_num = 0;
3091
3092         *presult = result;
3093         return NT_STATUS_OK;
3094
3095  fail:
3096         TALLOC_FREE(result);
3097         return NT_STATUS_NO_MEMORY;
3098 }
3099
3100 #ifdef HAVE_KRB5
3101 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3102 {
3103         data_blob_free(&auth->session_key);
3104         return 0;
3105 }
3106 #endif
3107
3108 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3109                                    enum pipe_auth_level auth_level,
3110                                    const char *service_princ,
3111                                    const char *username,
3112                                    const char *password,
3113                                    struct cli_pipe_auth_data **presult)
3114 {
3115 #ifdef HAVE_KRB5
3116         struct cli_pipe_auth_data *result;
3117
3118         if ((username != NULL) && (password != NULL)) {
3119                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3120                 if (ret != 0) {
3121                         return NT_STATUS_ACCESS_DENIED;
3122                 }
3123         }
3124
3125         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3126         if (result == NULL) {
3127                 return NT_STATUS_NO_MEMORY;
3128         }
3129
3130         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3131         result->auth_level = auth_level;
3132
3133         /*
3134          * Username / domain need fixing!
3135          */
3136         result->user_name = talloc_strdup(result, "");
3137         result->domain = talloc_strdup(result, "");
3138         if ((result->user_name == NULL) || (result->domain == NULL)) {
3139                 goto fail;
3140         }
3141
3142         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3143                 result, struct kerberos_auth_struct);
3144         if (result->a_u.kerberos_auth == NULL) {
3145                 goto fail;
3146         }
3147         talloc_set_destructor(result->a_u.kerberos_auth,
3148                               cli_auth_kerberos_data_destructor);
3149
3150         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3151                 result, service_princ);
3152         if (result->a_u.kerberos_auth->service_principal == NULL) {
3153                 goto fail;
3154         }
3155
3156         *presult = result;
3157         return NT_STATUS_OK;
3158
3159  fail:
3160         TALLOC_FREE(result);
3161         return NT_STATUS_NO_MEMORY;
3162 #else
3163         return NT_STATUS_NOT_SUPPORTED;
3164 #endif
3165 }
3166
3167 /**
3168  * Create an rpc pipe client struct, connecting to a tcp port.
3169  */
3170 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3171                                        uint16_t port,
3172                                        const struct ndr_syntax_id *abstract_syntax,
3173                                        struct rpc_pipe_client **presult)
3174 {
3175         struct rpc_pipe_client *result;
3176         struct sockaddr_storage addr;
3177         NTSTATUS status;
3178         int fd;
3179
3180         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3181         if (result == NULL) {
3182                 return NT_STATUS_NO_MEMORY;
3183         }
3184
3185         result->abstract_syntax = *abstract_syntax;
3186         result->transfer_syntax = ndr_transfer_syntax;
3187         result->dispatch = cli_do_rpc_ndr;
3188
3189         result->desthost = talloc_strdup(result, host);
3190         result->srv_name_slash = talloc_asprintf_strupper_m(
3191                 result, "\\\\%s", result->desthost);
3192         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3193                 status = NT_STATUS_NO_MEMORY;
3194                 goto fail;
3195         }
3196
3197         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3198         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3199
3200         if (!resolve_name(host, &addr, 0)) {
3201                 status = NT_STATUS_NOT_FOUND;
3202                 goto fail;
3203         }
3204
3205         status = open_socket_out(&addr, port, 60, &fd);
3206         if (!NT_STATUS_IS_OK(status)) {
3207                 goto fail;
3208         }
3209         set_socket_options(fd, lp_socket_options());
3210
3211         status = rpc_transport_sock_init(result, fd, &result->transport);
3212         if (!NT_STATUS_IS_OK(status)) {
3213                 close(fd);
3214                 goto fail;
3215         }
3216
3217         *presult = result;
3218         return NT_STATUS_OK;
3219
3220  fail:
3221         TALLOC_FREE(result);
3222         return status;
3223 }
3224
3225 /**
3226  * Determine the tcp port on which a dcerpc interface is listening
3227  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3228  * target host.
3229  */
3230 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3231                                       const struct ndr_syntax_id *abstract_syntax,
3232                                       uint16_t *pport)
3233 {
3234         NTSTATUS status;
3235         struct rpc_pipe_client *epm_pipe = NULL;
3236         struct cli_pipe_auth_data *auth = NULL;
3237         struct dcerpc_binding *map_binding = NULL;
3238         struct dcerpc_binding *res_binding = NULL;
3239         struct epm_twr_t *map_tower = NULL;
3240         struct epm_twr_t *res_towers = NULL;
3241         struct policy_handle *entry_handle = NULL;
3242         uint32_t num_towers = 0;
3243         uint32_t max_towers = 1;
3244         struct epm_twr_p_t towers;
3245         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3246
3247         if (pport == NULL) {
3248                 status = NT_STATUS_INVALID_PARAMETER;
3249                 goto done;
3250         }
3251
3252         /* open the connection to the endpoint mapper */
3253         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3254                                         &ndr_table_epmapper.syntax_id,
3255                                         &epm_pipe);
3256
3257         if (!NT_STATUS_IS_OK(status)) {
3258                 goto done;
3259         }
3260
3261         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3262         if (!NT_STATUS_IS_OK(status)) {
3263                 goto done;
3264         }
3265
3266         status = rpc_pipe_bind(epm_pipe, auth);
3267         if (!NT_STATUS_IS_OK(status)) {
3268                 goto done;
3269         }
3270
3271         /* create tower for asking the epmapper */
3272
3273         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3274         if (map_binding == NULL) {
3275                 status = NT_STATUS_NO_MEMORY;
3276                 goto done;
3277         }
3278
3279         map_binding->transport = NCACN_IP_TCP;
3280         map_binding->object = *abstract_syntax;
3281         map_binding->host = host; /* needed? */
3282         map_binding->endpoint = "0"; /* correct? needed? */
3283
3284         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3285         if (map_tower == NULL) {
3286                 status = NT_STATUS_NO_MEMORY;
3287                 goto done;
3288         }
3289
3290         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3291                                             &(map_tower->tower));
3292         if (!NT_STATUS_IS_OK(status)) {
3293                 goto done;
3294         }
3295
3296         /* allocate further parameters for the epm_Map call */
3297
3298         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3299         if (res_towers == NULL) {
3300                 status = NT_STATUS_NO_MEMORY;
3301                 goto done;
3302         }
3303         towers.twr = res_towers;
3304
3305         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3306         if (entry_handle == NULL) {
3307                 status = NT_STATUS_NO_MEMORY;
3308                 goto done;
3309         }
3310
3311         /* ask the endpoint mapper for the port */
3312
3313         status = rpccli_epm_Map(epm_pipe,
3314                                 tmp_ctx,
3315                                 CONST_DISCARD(struct GUID *,
3316                                               &(abstract_syntax->uuid)),
3317                                 map_tower,
3318                                 entry_handle,
3319                                 max_towers,
3320                                 &num_towers,
3321                                 &towers);
3322
3323         if (!NT_STATUS_IS_OK(status)) {
3324                 goto done;
3325         }
3326
3327         if (num_towers != 1) {
3328                 status = NT_STATUS_UNSUCCESSFUL;
3329                 goto done;
3330         }
3331
3332         /* extract the port from the answer */
3333
3334         status = dcerpc_binding_from_tower(tmp_ctx,
3335                                            &(towers.twr->tower),
3336                                            &res_binding);
3337         if (!NT_STATUS_IS_OK(status)) {
3338                 goto done;
3339         }
3340
3341         /* are further checks here necessary? */
3342         if (res_binding->transport != NCACN_IP_TCP) {
3343                 status = NT_STATUS_UNSUCCESSFUL;
3344                 goto done;
3345         }
3346
3347         *pport = (uint16_t)atoi(res_binding->endpoint);
3348
3349 done:
3350         TALLOC_FREE(tmp_ctx);
3351         return status;
3352 }
3353
3354 /**
3355  * Create a rpc pipe client struct, connecting to a host via tcp.
3356  * The port is determined by asking the endpoint mapper on the given
3357  * host.
3358  */
3359 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3360                            const struct ndr_syntax_id *abstract_syntax,
3361                            struct rpc_pipe_client **presult)
3362 {
3363         NTSTATUS status;
3364         uint16_t port = 0;
3365
3366         *presult = NULL;
3367
3368         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3369         if (!NT_STATUS_IS_OK(status)) {
3370                 goto done;
3371         }
3372
3373         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3374                                         abstract_syntax, presult);
3375
3376 done:
3377         return status;
3378 }
3379
3380 /********************************************************************
3381  Create a rpc pipe client struct, connecting to a unix domain socket
3382  ********************************************************************/
3383 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3384                                const struct ndr_syntax_id *abstract_syntax,
3385                                struct rpc_pipe_client **presult)
3386 {
3387         struct rpc_pipe_client *result;
3388         struct sockaddr_un addr;
3389         NTSTATUS status;
3390         int fd;
3391
3392         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3393         if (result == NULL) {
3394                 return NT_STATUS_NO_MEMORY;
3395         }
3396
3397         result->abstract_syntax = *abstract_syntax;
3398         result->transfer_syntax = ndr_transfer_syntax;
3399         result->dispatch = cli_do_rpc_ndr;
3400
3401         result->desthost = talloc_get_myname(result);
3402         result->srv_name_slash = talloc_asprintf_strupper_m(
3403                 result, "\\\\%s", result->desthost);
3404         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3405                 status = NT_STATUS_NO_MEMORY;
3406                 goto fail;
3407         }
3408
3409         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3410         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3411
3412         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3413         if (fd == -1) {
3414                 status = map_nt_error_from_unix(errno);
3415                 goto fail;
3416         }
3417
3418         ZERO_STRUCT(addr);
3419         addr.sun_family = AF_UNIX;
3420         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3421
3422         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3423                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3424                           strerror(errno)));
3425                 close(fd);
3426                 return map_nt_error_from_unix(errno);
3427         }
3428
3429         status = rpc_transport_sock_init(result, fd, &result->transport);
3430         if (!NT_STATUS_IS_OK(status)) {
3431                 close(fd);
3432                 goto fail;
3433         }
3434
3435         *presult = result;
3436         return NT_STATUS_OK;
3437
3438  fail:
3439         TALLOC_FREE(result);
3440         return status;
3441 }
3442
3443 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3444 {
3445         struct cli_state *cli;
3446
3447         cli = rpc_pipe_np_smb_conn(p);
3448         if (cli != NULL) {
3449                 DLIST_REMOVE(cli->pipe_list, p);
3450         }
3451         return 0;
3452 }
3453
3454 /****************************************************************************
3455  Open a named pipe over SMB to a remote server.
3456  *
3457  * CAVEAT CALLER OF THIS FUNCTION:
3458  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3459  *    so be sure that this function is called AFTER any structure (vs pointer)
3460  *    assignment of the cli.  In particular, libsmbclient does structure
3461  *    assignments of cli, which invalidates the data in the returned
3462  *    rpc_pipe_client if this function is called before the structure assignment
3463  *    of cli.
3464  * 
3465  ****************************************************************************/
3466
3467 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3468                                  const struct ndr_syntax_id *abstract_syntax,
3469                                  struct rpc_pipe_client **presult)
3470 {
3471         struct rpc_pipe_client *result;
3472         NTSTATUS status;
3473
3474         /* sanity check to protect against crashes */
3475
3476         if ( !cli ) {
3477                 return NT_STATUS_INVALID_HANDLE;
3478         }
3479
3480         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3481         if (result == NULL) {
3482                 return NT_STATUS_NO_MEMORY;
3483         }
3484
3485         result->abstract_syntax = *abstract_syntax;
3486         result->transfer_syntax = ndr_transfer_syntax;
3487         result->dispatch = cli_do_rpc_ndr;
3488         result->desthost = talloc_strdup(result, cli->desthost);
3489         result->srv_name_slash = talloc_asprintf_strupper_m(
3490                 result, "\\\\%s", result->desthost);
3491
3492         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3493         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3494
3495         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3496                 TALLOC_FREE(result);
3497                 return NT_STATUS_NO_MEMORY;
3498         }
3499
3500         status = rpc_transport_np_init(result, cli, abstract_syntax,
3501                                        &result->transport);
3502         if (!NT_STATUS_IS_OK(status)) {
3503                 TALLOC_FREE(result);
3504                 return status;
3505         }
3506
3507         DLIST_ADD(cli->pipe_list, result);
3508         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3509
3510         *presult = result;
3511         return NT_STATUS_OK;
3512 }
3513
3514 /****************************************************************************
3515  Open a pipe to a remote server.
3516  ****************************************************************************/
3517
3518 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3519                                   const struct ndr_syntax_id *interface,
3520                                   struct rpc_pipe_client **presult)
3521 {
3522         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3523                 /*
3524                  * We should have a better way to figure out this drsuapi
3525                  * speciality...
3526                  */
3527                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3528                                          presult);
3529         }
3530
3531         return rpc_pipe_open_np(cli, interface, presult);
3532 }
3533
3534 /****************************************************************************
3535  Open a named pipe to an SMB server and bind anonymously.
3536  ****************************************************************************/
3537
3538 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3539                                   const struct ndr_syntax_id *interface,
3540                                   struct rpc_pipe_client **presult)
3541 {
3542         struct rpc_pipe_client *result;
3543         struct cli_pipe_auth_data *auth;
3544         NTSTATUS status;
3545
3546         status = cli_rpc_pipe_open(cli, interface, &result);
3547         if (!NT_STATUS_IS_OK(status)) {
3548                 return status;
3549         }
3550
3551         status = rpccli_anon_bind_data(result, &auth);
3552         if (!NT_STATUS_IS_OK(status)) {
3553                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3554                           nt_errstr(status)));
3555                 TALLOC_FREE(result);
3556                 return status;
3557         }
3558
3559         /*
3560          * This is a bit of an abstraction violation due to the fact that an
3561          * anonymous bind on an authenticated SMB inherits the user/domain
3562          * from the enclosing SMB creds
3563          */
3564
3565         TALLOC_FREE(auth->user_name);
3566         TALLOC_FREE(auth->domain);
3567
3568         auth->user_name = talloc_strdup(auth, cli->user_name);
3569         auth->domain = talloc_strdup(auth, cli->domain);
3570         auth->user_session_key = data_blob_talloc(auth,
3571                 cli->user_session_key.data,
3572                 cli->user_session_key.length);
3573
3574         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3575                 TALLOC_FREE(result);
3576                 return NT_STATUS_NO_MEMORY;
3577         }
3578
3579         status = rpc_pipe_bind(result, auth);
3580         if (!NT_STATUS_IS_OK(status)) {
3581                 int lvl = 0;
3582                 if (ndr_syntax_id_equal(interface,
3583                                         &ndr_table_dssetup.syntax_id)) {
3584                         /* non AD domains just don't have this pipe, avoid
3585                          * level 0 statement in that case - gd */
3586                         lvl = 3;
3587                 }
3588                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3589                             "%s failed with error %s\n",
3590                             cli_get_pipe_name_from_iface(debug_ctx(),
3591                                                          interface),
3592                             nt_errstr(status) ));
3593                 TALLOC_FREE(result);
3594                 return status;
3595         }
3596
3597         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3598                   "%s and bound anonymously.\n",
3599                   cli_get_pipe_name_from_iface(debug_ctx(), interface),
3600                   cli->desthost ));
3601
3602         *presult = result;
3603         return NT_STATUS_OK;
3604 }
3605
3606 /****************************************************************************
3607  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3608  ****************************************************************************/
3609
3610 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3611                                                    const struct ndr_syntax_id *interface,
3612                                                    enum pipe_auth_type auth_type,
3613                                                    enum pipe_auth_level auth_level,
3614                                                    const char *domain,
3615                                                    const char *username,
3616                                                    const char *password,
3617                                                    struct rpc_pipe_client **presult)
3618 {
3619         struct rpc_pipe_client *result;
3620         struct cli_pipe_auth_data *auth;
3621         NTSTATUS status;
3622
3623         status = cli_rpc_pipe_open(cli, interface, &result);
3624         if (!NT_STATUS_IS_OK(status)) {
3625                 return status;
3626         }
3627
3628         status = rpccli_ntlmssp_bind_data(
3629                 result, auth_type, auth_level, domain, username,
3630                 cli->pwd.null_pwd ? NULL : password, &auth);
3631         if (!NT_STATUS_IS_OK(status)) {
3632                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3633                           nt_errstr(status)));
3634                 goto err;
3635         }
3636
3637         status = rpc_pipe_bind(result, auth);
3638         if (!NT_STATUS_IS_OK(status)) {
3639                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3640                         nt_errstr(status) ));
3641                 goto err;
3642         }
3643
3644         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3645                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3646                   cli_get_pipe_name_from_iface(debug_ctx(), interface),
3647                   cli->desthost, domain, username ));
3648
3649         *presult = result;
3650         return NT_STATUS_OK;
3651
3652   err:
3653
3654         TALLOC_FREE(result);
3655         return status;
3656 }
3657
3658 /****************************************************************************
3659  External interface.
3660  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3661  ****************************************************************************/
3662
3663 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3664                                    const struct ndr_syntax_id *interface,
3665                                    enum pipe_auth_level auth_level,
3666                                    const char *domain,
3667                                    const char *username,
3668                                    const char *password,
3669                                    struct rpc_pipe_client **presult)
3670 {
3671         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3672                                                 interface,
3673                                                 PIPE_AUTH_TYPE_NTLMSSP,
3674                                                 auth_level,
3675                                                 domain,
3676                                                 username,
3677                                                 password,
3678                                                 presult);
3679 }
3680
3681 /****************************************************************************
3682  External interface.
3683  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3684  ****************************************************************************/
3685
3686 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3687                                           const struct ndr_syntax_id *interface,
3688                                           enum pipe_auth_level auth_level,
3689                                           const char *domain,
3690                                           const char *username,
3691                                           const char *password,
3692                                           struct rpc_pipe_client **presult)
3693 {
3694         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3695                                                 interface,
3696                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3697                                                 auth_level,
3698                                                 domain,
3699                                                 username,
3700                                                 password,
3701                                                 presult);
3702 }
3703
3704 /****************************************************************************
3705   Get a the schannel session key out of an already opened netlogon pipe.
3706  ****************************************************************************/
3707 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3708                                                 struct cli_state *cli,
3709                                                 const char *domain,
3710                                                 uint32 *pneg_flags)
3711 {
3712         uint32 sec_chan_type = 0;
3713         unsigned char machine_pwd[16];
3714         const char *machine_account;
3715         NTSTATUS status;
3716
3717         /* Get the machine account credentials from secrets.tdb. */
3718         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3719                                &sec_chan_type))
3720         {
3721                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3722                         "trust account password for domain '%s'\n",
3723                         domain));
3724                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3725         }
3726
3727         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3728                                         cli->desthost, /* server name */
3729                                         domain,        /* domain */
3730                                         global_myname(), /* client name */
3731                                         machine_account, /* machine account name */
3732                                         machine_pwd,
3733                                         sec_chan_type,
3734                                         pneg_flags);
3735
3736         if (!NT_STATUS_IS_OK(status)) {
3737                 DEBUG(3, ("get_schannel_session_key_common: "
3738                           "rpccli_netlogon_setup_creds failed with result %s "
3739                           "to server %s, domain %s, machine account %s.\n",
3740                           nt_errstr(status), cli->desthost, domain,
3741                           machine_account ));
3742                 return status;
3743         }
3744
3745         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3746                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3747                         cli->desthost));
3748                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3749         }
3750
3751         return NT_STATUS_OK;;
3752 }
3753
3754 /****************************************************************************
3755  Open a netlogon pipe and get the schannel session key.
3756  Now exposed to external callers.
3757  ****************************************************************************/
3758
3759
3760 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3761                                   const char *domain,
3762                                   uint32 *pneg_flags,
3763                                   struct rpc_pipe_client **presult)
3764 {
3765         struct rpc_pipe_client *netlogon_pipe = NULL;
3766         NTSTATUS status;
3767
3768         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3769                                           &netlogon_pipe);
3770         if (!NT_STATUS_IS_OK(status)) {
3771                 return status;
3772         }
3773
3774         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3775                                                  pneg_flags);
3776         if (!NT_STATUS_IS_OK(status)) {
3777                 TALLOC_FREE(netlogon_pipe);
3778                 return status;
3779         }
3780
3781         *presult = netlogon_pipe;
3782         return NT_STATUS_OK;
3783 }
3784
3785 /****************************************************************************
3786  External interface.
3787  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3788  using session_key. sign and seal.
3789  ****************************************************************************/
3790
3791 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3792                                              const struct ndr_syntax_id *interface,
3793                                              enum pipe_auth_level auth_level,
3794                                              const char *domain,
3795                                              const struct dcinfo *pdc,
3796                                              struct rpc_pipe_client **presult)
3797 {
3798         struct rpc_pipe_client *result;
3799         struct cli_pipe_auth_data *auth;
3800         NTSTATUS status;
3801
3802         status = cli_rpc_pipe_open(cli, interface, &result);
3803         if (!NT_STATUS_IS_OK(status)) {
3804                 return status;
3805         }
3806
3807         status = rpccli_schannel_bind_data(result, domain, auth_level,
3808                                            pdc->sess_key, &auth);
3809         if (!NT_STATUS_IS_OK(status)) {
3810                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3811                           nt_errstr(status)));
3812                 TALLOC_FREE(result);
3813                 return status;
3814         }
3815
3816         status = rpc_pipe_bind(result, auth);
3817         if (!NT_STATUS_IS_OK(status)) {
3818                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3819                           "cli_rpc_pipe_bind failed with error %s\n",
3820                           nt_errstr(status) ));
3821                 TALLOC_FREE(result);
3822                 return status;
3823         }
3824
3825         /*
3826          * The credentials on a new netlogon pipe are the ones we are passed
3827          * in - copy them over.
3828          */
3829         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3830         if (result->dc == NULL) {
3831                 DEBUG(0, ("talloc failed\n"));
3832                 TALLOC_FREE(result);
3833                 return NT_STATUS_NO_MEMORY;
3834         }
3835
3836         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3837                   "for domain %s and bound using schannel.\n",
3838                   cli_get_pipe_name_from_iface(debug_ctx(), interface),
3839                   cli->desthost, domain ));
3840
3841         *presult = result;
3842         return NT_STATUS_OK;
3843 }
3844
3845 /****************************************************************************
3846  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3847  Fetch the session key ourselves using a temporary netlogon pipe. This
3848  version uses an ntlmssp auth bound netlogon pipe to get the key.
3849  ****************************************************************************/
3850
3851 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3852                                                       const char *domain,
3853                                                       const char *username,
3854                                                       const char *password,
3855                                                       uint32 *pneg_flags,
3856                                                       struct rpc_pipe_client **presult)
3857 {
3858         struct rpc_pipe_client *netlogon_pipe = NULL;
3859         NTSTATUS status;
3860
3861         status = cli_rpc_pipe_open_spnego_ntlmssp(
3862                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3863                 domain, username, password, &netlogon_pipe);
3864         if (!NT_STATUS_IS_OK(status)) {
3865                 return status;
3866         }
3867
3868         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3869                                                  pneg_flags);
3870         if (!NT_STATUS_IS_OK(status)) {
3871                 TALLOC_FREE(netlogon_pipe);
3872                 return status;
3873         }
3874
3875         *presult = netlogon_pipe;
3876         return NT_STATUS_OK;
3877 }
3878
3879 /****************************************************************************
3880  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3881  Fetch the session key ourselves using a temporary netlogon pipe. This version
3882  uses an ntlmssp bind to get the session key.
3883  ****************************************************************************/
3884
3885 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3886                                                  const struct ndr_syntax_id *interface,
3887                                                  enum pipe_auth_level auth_level,
3888                                                  const char *domain,
3889                                                  const char *username,
3890                                                  const char *password,
3891                                                  struct rpc_pipe_client **presult)
3892 {
3893         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3894         struct rpc_pipe_client *netlogon_pipe = NULL;
3895         struct rpc_pipe_client *result = NULL;
3896         NTSTATUS status;
3897
3898         status = get_schannel_session_key_auth_ntlmssp(
3899                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3900         if (!NT_STATUS_IS_OK(status)) {
3901                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3902                         "key from server %s for domain %s.\n",
3903                         cli->desthost, domain ));
3904                 return status;
3905         }
3906
3907         status = cli_rpc_pipe_open_schannel_with_key(
3908                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3909                 &result);
3910
3911         /* Now we've bound using the session key we can close the netlog pipe. */
3912         TALLOC_FREE(netlogon_pipe);
3913
3914         if (NT_STATUS_IS_OK(status)) {
3915                 *presult = result;
3916         }
3917         return status;
3918 }
3919
3920 /****************************************************************************
3921  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3922  Fetch the session key ourselves using a temporary netlogon pipe.
3923  ****************************************************************************/
3924
3925 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
3926                                     const struct ndr_syntax_id *interface,
3927                                     enum pipe_auth_level auth_level,
3928                                     const char *domain,
3929                                     struct rpc_pipe_client **presult)
3930 {
3931         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3932         struct rpc_pipe_client *netlogon_pipe = NULL;
3933         struct rpc_pipe_client *result = NULL;
3934         NTSTATUS status;
3935
3936         status = get_schannel_session_key(cli, domain, &neg_flags,
3937                                           &netlogon_pipe);
3938         if (!NT_STATUS_IS_OK(status)) {
3939                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
3940                         "key from server %s for domain %s.\n",
3941                         cli->desthost, domain ));
3942                 return status;
3943         }
3944
3945         status = cli_rpc_pipe_open_schannel_with_key(
3946                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3947                 &result);
3948
3949         /* Now we've bound using the session key we can close the netlog pipe. */
3950         TALLOC_FREE(netlogon_pipe);
3951
3952         if (NT_STATUS_IS_OK(status)) {
3953                 *presult = result;
3954         }
3955
3956         return NT_STATUS_OK;
3957 }
3958
3959 /****************************************************************************
3960  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
3961  The idea is this can be called with service_princ, username and password all
3962  NULL so long as the caller has a TGT.
3963  ****************************************************************************/
3964
3965 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
3966                                 const struct ndr_syntax_id *interface,
3967                                 enum pipe_auth_level auth_level,
3968                                 const char *service_princ,
3969                                 const char *username,
3970                                 const char *password,
3971                                 struct rpc_pipe_client **presult)
3972 {
3973 #ifdef HAVE_KRB5
3974         struct rpc_pipe_client *result;
3975         struct cli_pipe_auth_data *auth;
3976         NTSTATUS status;
3977
3978         status = cli_rpc_pipe_open(cli, interface, &result);
3979         if (!NT_STATUS_IS_OK(status)) {
3980                 return status;
3981         }
3982
3983         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
3984                                            username, password, &auth);
3985         if (!NT_STATUS_IS_OK(status)) {
3986                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
3987                           nt_errstr(status)));
3988                 TALLOC_FREE(result);
3989                 return status;
3990         }
3991
3992         status = rpc_pipe_bind(result, auth);
3993         if (!NT_STATUS_IS_OK(status)) {
3994                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
3995                           "with error %s\n", nt_errstr(status)));
3996                 TALLOC_FREE(result);
3997                 return status;
3998         }
3999
4000         *presult = result;
4001         return NT_STATUS_OK;
4002 #else
4003         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4004         return NT_STATUS_NOT_IMPLEMENTED;
4005 #endif
4006 }
4007
4008 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4009                              struct rpc_pipe_client *cli,
4010                              DATA_BLOB *session_key)
4011 {
4012         if (!session_key || !cli) {
4013                 return NT_STATUS_INVALID_PARAMETER;
4014         }
4015
4016         if (!cli->auth) {
4017                 return NT_STATUS_INVALID_PARAMETER;
4018         }
4019
4020         switch (cli->auth->auth_type) {
4021                 case PIPE_AUTH_TYPE_SCHANNEL:
4022                         *session_key = data_blob_talloc(mem_ctx,
4023                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4024                         break;
4025                 case PIPE_AUTH_TYPE_NTLMSSP:
4026                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4027                         *session_key = data_blob_talloc(mem_ctx,
4028                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4029                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4030                         break;
4031                 case PIPE_AUTH_TYPE_KRB5:
4032                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4033                         *session_key = data_blob_talloc(mem_ctx,
4034                                 cli->auth->a_u.kerberos_auth->session_key.data,
4035                                 cli->auth->a_u.kerberos_auth->session_key.length);
4036                         break;
4037                 case PIPE_AUTH_TYPE_NONE:
4038                         *session_key = data_blob_talloc(mem_ctx,
4039                                 cli->auth->user_session_key.data,
4040                                 cli->auth->user_session_key.length);
4041                         break;
4042                 default:
4043                         return NT_STATUS_NO_USER_SESSION_KEY;
4044         }
4045
4046         return NT_STATUS_OK;
4047 }
4048
4049 /**
4050  * Create a new RPC client context which uses a local dispatch function.
4051  */
4052 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax, 
4053                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4054                                 struct auth_serversupplied_info *serversupplied_info,
4055                                 struct rpc_pipe_client **presult)
4056 {
4057         struct rpc_pipe_client *result;
4058
4059         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4060         if (result == NULL) {
4061                 return NT_STATUS_NO_MEMORY;
4062         }
4063
4064         result->abstract_syntax = *abstract_syntax;
4065         result->transfer_syntax = ndr_transfer_syntax;
4066         result->dispatch = dispatch;
4067
4068         result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4069         if (result->pipes_struct == NULL) {
4070                 TALLOC_FREE(result);
4071                 return NT_STATUS_NO_MEMORY;
4072         }
4073         result->pipes_struct->mem_ctx = mem_ctx;
4074         result->pipes_struct->server_info = serversupplied_info;
4075         result->pipes_struct->pipe_bound = true;
4076
4077         result->max_xmit_frag = -1;
4078         result->max_recv_frag = -1;
4079
4080         *presult = result;
4081         return NT_STATUS_OK;
4082 }
4083
4084