s3-kerberos: only use krb5 headers where required.
[samba.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22 #include "smb_krb5.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_CLI
26
27 /*******************************************************************
28 interface/version dce/rpc pipe identification
29 ********************************************************************/
30
31 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
32 #define PIPE_SAMR     "\\PIPE\\samr"
33 #define PIPE_WINREG   "\\PIPE\\winreg"
34 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
35 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
36 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
37 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
38 #define PIPE_LSASS    "\\PIPE\\lsass"
39 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
40 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
41 #define PIPE_NETDFS   "\\PIPE\\netdfs"
42 #define PIPE_ECHO     "\\PIPE\\rpcecho"
43 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
44 #define PIPE_EPM      "\\PIPE\\epmapper"
45 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
46 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
47 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
48 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
49
50 /*
51  * IMPORTANT!!  If you update this structure, make sure to
52  * update the index #defines in smb.h.
53  */
54
55 static const struct pipe_id_info {
56         /* the names appear not to matter: the syntaxes _do_ matter */
57
58         const char *client_pipe;
59         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
60 } pipe_names [] =
61 {
62         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
63         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
64         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
65         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
66         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
67         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
68         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
69         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
70         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
71         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
72         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
73         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
74         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
75         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
76         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
77         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
78         { NULL, NULL }
79 };
80
81 /****************************************************************************
82  Return the pipe name from the interface.
83  ****************************************************************************/
84
85 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
86 {
87         char *guid_str;
88         const char *result;
89         int i;
90         for (i = 0; pipe_names[i].client_pipe; i++) {
91                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
92                                         interface)) {
93                         return &pipe_names[i].client_pipe[5];
94                 }
95         }
96
97         /*
98          * Here we should ask \\epmapper, but for now our code is only
99          * interested in the known pipes mentioned in pipe_names[]
100          */
101
102         guid_str = GUID_string(talloc_tos(), &interface->uuid);
103         if (guid_str == NULL) {
104                 return NULL;
105         }
106         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
107                                  (int)interface->if_version);
108         TALLOC_FREE(guid_str);
109
110         if (result == NULL) {
111                 return "PIPE";
112         }
113         return result;
114 }
115
116 /********************************************************************
117  Map internal value to wire value.
118  ********************************************************************/
119
120 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
121 {
122         switch (auth_type) {
123
124         case PIPE_AUTH_TYPE_NONE:
125                 return RPC_ANONYMOUS_AUTH_TYPE;
126
127         case PIPE_AUTH_TYPE_NTLMSSP:
128                 return RPC_NTLMSSP_AUTH_TYPE;
129
130         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
131         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
132                 return RPC_SPNEGO_AUTH_TYPE;
133
134         case PIPE_AUTH_TYPE_SCHANNEL:
135                 return RPC_SCHANNEL_AUTH_TYPE;
136
137         case PIPE_AUTH_TYPE_KRB5:
138                 return RPC_KRB5_AUTH_TYPE;
139
140         default:
141                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
142                         "auth type %u\n",
143                         (unsigned int)auth_type ));
144                 break;
145         }
146         return -1;
147 }
148
149 /********************************************************************
150  Pipe description for a DEBUG
151  ********************************************************************/
152 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
153                                    struct rpc_pipe_client *cli)
154 {
155         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
156         if (result == NULL) {
157                 return "pipe";
158         }
159         return result;
160 }
161
162 /********************************************************************
163  Rpc pipe call id.
164  ********************************************************************/
165
166 static uint32 get_rpc_call_id(void)
167 {
168         static uint32 call_id = 0;
169         return ++call_id;
170 }
171
172 /*
173  * Realloc pdu to have a least "size" bytes
174  */
175
176 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
177 {
178         size_t extra_size;
179
180         if (prs_data_size(pdu) >= size) {
181                 return true;
182         }
183
184         extra_size = size - prs_data_size(pdu);
185
186         if (!prs_force_grow(pdu, extra_size)) {
187                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
188                           "%d bytes.\n", (int)extra_size));
189                 return false;
190         }
191
192         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
193                   (int)extra_size, prs_data_size(pdu)));
194         return true;
195 }
196
197
198 /*******************************************************************
199  Use SMBreadX to get rest of one fragment's worth of rpc data.
200  Reads the whole size or give an error message
201  ********************************************************************/
202
203 struct rpc_read_state {
204         struct event_context *ev;
205         struct rpc_cli_transport *transport;
206         uint8_t *data;
207         size_t size;
208         size_t num_read;
209 };
210
211 static void rpc_read_done(struct async_req *subreq);
212
213 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
214                                        struct event_context *ev,
215                                        struct rpc_cli_transport *transport,
216                                        uint8_t *data, size_t size)
217 {
218         struct async_req *result, *subreq;
219         struct rpc_read_state *state;
220
221         if (!async_req_setup(mem_ctx, &result, &state,
222                              struct rpc_read_state)) {
223                 return NULL;
224         }
225         state->ev = ev;
226         state->transport = transport;
227         state->data = data;
228         state->size = size;
229         state->num_read = 0;
230
231         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232
233         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234                                       transport->priv);
235         if (subreq == NULL) {
236                 goto fail;
237         }
238         subreq->async.fn = rpc_read_done;
239         subreq->async.priv = result;
240         return result;
241
242  fail:
243         TALLOC_FREE(result);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct async_req *subreq)
248 {
249         struct async_req *req = talloc_get_type_abort(
250                 subreq->async.priv, struct async_req);
251         struct rpc_read_state *state = talloc_get_type_abort(
252                 req->private_data, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 async_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 async_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (async_req_nomem(subreq, req)) {
274                 return;
275         }
276         subreq->async.fn = rpc_read_done;
277         subreq->async.priv = req;
278 }
279
280 static NTSTATUS rpc_read_recv(struct async_req *req)
281 {
282         return async_req_simple_recv_ntstatus(req);
283 }
284
285 struct rpc_write_state {
286         struct event_context *ev;
287         struct rpc_cli_transport *transport;
288         const uint8_t *data;
289         size_t size;
290         size_t num_written;
291 };
292
293 static void rpc_write_done(struct async_req *subreq);
294
295 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296                                         struct event_context *ev,
297                                         struct rpc_cli_transport *transport,
298                                         const uint8_t *data, size_t size)
299 {
300         struct async_req *result, *subreq;
301         struct rpc_write_state *state;
302
303         if (!async_req_setup(mem_ctx, &result, &state,
304                              struct rpc_write_state)) {
305                 return NULL;
306         }
307         state->ev = ev;
308         state->transport = transport;
309         state->data = data;
310         state->size = size;
311         state->num_written = 0;
312
313         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
314
315         subreq = transport->write_send(state, ev, data, size, transport->priv);
316         if (subreq == NULL) {
317                 goto fail;
318         }
319         subreq->async.fn = rpc_write_done;
320         subreq->async.priv = result;
321         return result;
322  fail:
323         TALLOC_FREE(result);
324         return NULL;
325 }
326
327 static void rpc_write_done(struct async_req *subreq)
328 {
329         struct async_req *req = talloc_get_type_abort(
330                 subreq->async.priv, struct async_req);
331         struct rpc_write_state *state = talloc_get_type_abort(
332                 req->private_data, struct rpc_write_state);
333         NTSTATUS status;
334         ssize_t written;
335
336         status = state->transport->write_recv(subreq, &written);
337         TALLOC_FREE(subreq);
338         if (!NT_STATUS_IS_OK(status)) {
339                 async_req_nterror(req, status);
340                 return;
341         }
342
343         state->num_written += written;
344
345         if (state->num_written == state->size) {
346                 async_req_done(req);
347                 return;
348         }
349
350         subreq = state->transport->write_send(state, state->ev,
351                                               state->data + state->num_written,
352                                               state->size - state->num_written,
353                                               state->transport->priv);
354         if (async_req_nomem(subreq, req)) {
355                 return;
356         }
357         subreq->async.fn = rpc_write_done;
358         subreq->async.priv = req;
359 }
360
361 static NTSTATUS rpc_write_recv(struct async_req *req)
362 {
363         return async_req_simple_recv_ntstatus(req);
364 }
365
366
367 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
368                                  struct rpc_hdr_info *prhdr,
369                                  prs_struct *pdu)
370 {
371         /*
372          * This next call sets the endian bit correctly in current_pdu. We
373          * will propagate this to rbuf later.
374          */
375
376         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
377                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
378                 return NT_STATUS_BUFFER_TOO_SMALL;
379         }
380
381         if (prhdr->frag_len > cli->max_recv_frag) {
382                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
383                           " we only allow %d\n", (int)prhdr->frag_len,
384                           (int)cli->max_recv_frag));
385                 return NT_STATUS_BUFFER_TOO_SMALL;
386         }
387
388         return NT_STATUS_OK;
389 }
390
391 /****************************************************************************
392  Try and get a PDU's worth of data from current_pdu. If not, then read more
393  from the wire.
394  ****************************************************************************/
395
396 struct get_complete_frag_state {
397         struct event_context *ev;
398         struct rpc_pipe_client *cli;
399         struct rpc_hdr_info *prhdr;
400         prs_struct *pdu;
401 };
402
403 static void get_complete_frag_got_header(struct async_req *subreq);
404 static void get_complete_frag_got_rest(struct async_req *subreq);
405
406 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
407                                                struct event_context *ev,
408                                                struct rpc_pipe_client *cli,
409                                                struct rpc_hdr_info *prhdr,
410                                                prs_struct *pdu)
411 {
412         struct async_req *result, *subreq;
413         struct get_complete_frag_state *state;
414         uint32_t pdu_len;
415         NTSTATUS status;
416
417         if (!async_req_setup(mem_ctx, &result, &state,
418                              struct get_complete_frag_state)) {
419                 return NULL;
420         }
421         state->ev = ev;
422         state->cli = cli;
423         state->prhdr = prhdr;
424         state->pdu = pdu;
425
426         pdu_len = prs_data_size(pdu);
427         if (pdu_len < RPC_HEADER_LEN) {
428                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
429                         status = NT_STATUS_NO_MEMORY;
430                         goto post_status;
431                 }
432                 subreq = rpc_read_send(
433                         state, state->ev,
434                         state->cli->transport,
435                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
436                         RPC_HEADER_LEN - pdu_len);
437                 if (subreq == NULL) {
438                         status = NT_STATUS_NO_MEMORY;
439                         goto post_status;
440                 }
441                 subreq->async.fn = get_complete_frag_got_header;
442                 subreq->async.priv = result;
443                 return result;
444         }
445
446         status = parse_rpc_header(cli, prhdr, pdu);
447         if (!NT_STATUS_IS_OK(status)) {
448                 goto post_status;
449         }
450
451         /*
452          * Ensure we have frag_len bytes of data.
453          */
454         if (pdu_len < prhdr->frag_len) {
455                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
456                         status = NT_STATUS_NO_MEMORY;
457                         goto post_status;
458                 }
459                 subreq = rpc_read_send(state, state->ev,
460                                        state->cli->transport,
461                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
462                                        prhdr->frag_len - pdu_len);
463                 if (subreq == NULL) {
464                         status = NT_STATUS_NO_MEMORY;
465                         goto post_status;
466                 }
467                 subreq->async.fn = get_complete_frag_got_rest;
468                 subreq->async.priv = result;
469                 return result;
470         }
471
472         status = NT_STATUS_OK;
473  post_status:
474         if (async_post_ntstatus(result, ev, status)) {
475                 return result;
476         }
477         TALLOC_FREE(result);
478         return NULL;
479 }
480
481 static void get_complete_frag_got_header(struct async_req *subreq)
482 {
483         struct async_req *req = talloc_get_type_abort(
484                 subreq->async.priv, struct async_req);
485         struct get_complete_frag_state *state = talloc_get_type_abort(
486                 req->private_data, struct get_complete_frag_state);
487         NTSTATUS status;
488
489         status = rpc_read_recv(subreq);
490         TALLOC_FREE(subreq);
491         if (!NT_STATUS_IS_OK(status)) {
492                 async_req_nterror(req, status);
493                 return;
494         }
495
496         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
497         if (!NT_STATUS_IS_OK(status)) {
498                 async_req_nterror(req, status);
499                 return;
500         }
501
502         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
503                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
504                 return;
505         }
506
507         /*
508          * We're here in this piece of code because we've read exactly
509          * RPC_HEADER_LEN bytes into state->pdu.
510          */
511
512         subreq = rpc_read_send(
513                 state, state->ev, state->cli->transport,
514                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
515                 state->prhdr->frag_len - RPC_HEADER_LEN);
516         if (async_req_nomem(subreq, req)) {
517                 return;
518         }
519         subreq->async.fn = get_complete_frag_got_rest;
520         subreq->async.priv = req;
521 }
522
523 static void get_complete_frag_got_rest(struct async_req *subreq)
524 {
525         struct async_req *req = talloc_get_type_abort(
526                 subreq->async.priv, struct async_req);
527         NTSTATUS status;
528
529         status = rpc_read_recv(subreq);
530         TALLOC_FREE(subreq);
531         if (!NT_STATUS_IS_OK(status)) {
532                 async_req_nterror(req, status);
533                 return;
534         }
535         async_req_done(req);
536 }
537
538 static NTSTATUS get_complete_frag_recv(struct async_req *req)
539 {
540         return async_req_simple_recv_ntstatus(req);
541 }
542
543 /****************************************************************************
544  NTLMSSP specific sign/seal.
545  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
546  In fact I should probably abstract these into identical pieces of code... JRA.
547  ****************************************************************************/
548
549 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
550                                 prs_struct *current_pdu,
551                                 uint8 *p_ss_padding_len)
552 {
553         RPC_HDR_AUTH auth_info;
554         uint32 save_offset = prs_offset(current_pdu);
555         uint32 auth_len = prhdr->auth_len;
556         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
557         unsigned char *data = NULL;
558         size_t data_len;
559         unsigned char *full_packet_data = NULL;
560         size_t full_packet_data_len;
561         DATA_BLOB auth_blob;
562         NTSTATUS status;
563
564         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
565             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
566                 return NT_STATUS_OK;
567         }
568
569         if (!ntlmssp_state) {
570                 return NT_STATUS_INVALID_PARAMETER;
571         }
572
573         /* Ensure there's enough data for an authenticated response. */
574         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
575                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
576                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
577                         (unsigned int)auth_len ));
578                 return NT_STATUS_BUFFER_TOO_SMALL;
579         }
580
581         /*
582          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
583          * after the RPC header.
584          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
585          * functions as NTLMv2 checks the rpc headers also.
586          */
587
588         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
589         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
590
591         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
592         full_packet_data_len = prhdr->frag_len - auth_len;
593
594         /* Pull the auth header and the following data into a blob. */
595         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
596                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
597                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
598                 return NT_STATUS_BUFFER_TOO_SMALL;
599         }
600
601         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
602                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
603                 return NT_STATUS_BUFFER_TOO_SMALL;
604         }
605
606         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
607         auth_blob.length = auth_len;
608
609         switch (cli->auth->auth_level) {
610                 case PIPE_AUTH_LEVEL_PRIVACY:
611                         /* Data is encrypted. */
612                         status = ntlmssp_unseal_packet(ntlmssp_state,
613                                                         data, data_len,
614                                                         full_packet_data,
615                                                         full_packet_data_len,
616                                                         &auth_blob);
617                         if (!NT_STATUS_IS_OK(status)) {
618                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
619                                         "packet from %s. Error was %s.\n",
620                                         rpccli_pipe_txt(debug_ctx(), cli),
621                                         nt_errstr(status) ));
622                                 return status;
623                         }
624                         break;
625                 case PIPE_AUTH_LEVEL_INTEGRITY:
626                         /* Data is signed. */
627                         status = ntlmssp_check_packet(ntlmssp_state,
628                                                         data, data_len,
629                                                         full_packet_data,
630                                                         full_packet_data_len,
631                                                         &auth_blob);
632                         if (!NT_STATUS_IS_OK(status)) {
633                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
634                                         "packet from %s. Error was %s.\n",
635                                         rpccli_pipe_txt(debug_ctx(), cli),
636                                         nt_errstr(status) ));
637                                 return status;
638                         }
639                         break;
640                 default:
641                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
642                                   "auth level %d\n", cli->auth->auth_level));
643                         return NT_STATUS_INVALID_INFO_CLASS;
644         }
645
646         /*
647          * Return the current pointer to the data offset.
648          */
649
650         if(!prs_set_offset(current_pdu, save_offset)) {
651                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
652                         (unsigned int)save_offset ));
653                 return NT_STATUS_BUFFER_TOO_SMALL;
654         }
655
656         /*
657          * Remember the padding length. We must remove it from the real data
658          * stream once the sign/seal is done.
659          */
660
661         *p_ss_padding_len = auth_info.auth_pad_len;
662
663         return NT_STATUS_OK;
664 }
665
666 /****************************************************************************
667  schannel specific sign/seal.
668  ****************************************************************************/
669
670 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
671                                 prs_struct *current_pdu,
672                                 uint8 *p_ss_padding_len)
673 {
674         RPC_HDR_AUTH auth_info;
675         RPC_AUTH_SCHANNEL_CHK schannel_chk;
676         uint32 auth_len = prhdr->auth_len;
677         uint32 save_offset = prs_offset(current_pdu);
678         struct schannel_auth_struct *schannel_auth =
679                 cli->auth->a_u.schannel_auth;
680         uint32 data_len;
681
682         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
683             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
684                 return NT_STATUS_OK;
685         }
686
687         if (auth_len < RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
688                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
689                 return NT_STATUS_INVALID_PARAMETER;
690         }
691
692         if (!schannel_auth) {
693                 return NT_STATUS_INVALID_PARAMETER;
694         }
695
696         /* Ensure there's enough data for an authenticated response. */
697         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
698                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
699                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
700                         (unsigned int)auth_len ));
701                 return NT_STATUS_INVALID_PARAMETER;
702         }
703
704         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
705
706         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
707                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
708                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
709                 return NT_STATUS_BUFFER_TOO_SMALL;
710         }
711
712         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
713                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
714                 return NT_STATUS_BUFFER_TOO_SMALL;
715         }
716
717         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
718                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
719                         auth_info.auth_type));
720                 return NT_STATUS_BUFFER_TOO_SMALL;
721         }
722
723         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
724                                 &schannel_chk, current_pdu, 0)) {
725                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
726                 return NT_STATUS_BUFFER_TOO_SMALL;
727         }
728
729         if (!schannel_decode(schannel_auth,
730                         cli->auth->auth_level,
731                         SENDER_IS_ACCEPTOR,
732                         &schannel_chk,
733                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
734                         data_len)) {
735                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
736                                 "Connection to %s.\n",
737                                 rpccli_pipe_txt(debug_ctx(), cli)));
738                 return NT_STATUS_INVALID_PARAMETER;
739         }
740
741         /* The sequence number gets incremented on both send and receive. */
742         schannel_auth->seq_num++;
743
744         /*
745          * Return the current pointer to the data offset.
746          */
747
748         if(!prs_set_offset(current_pdu, save_offset)) {
749                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
750                         (unsigned int)save_offset ));
751                 return NT_STATUS_BUFFER_TOO_SMALL;
752         }
753
754         /*
755          * Remember the padding length. We must remove it from the real data
756          * stream once the sign/seal is done.
757          */
758
759         *p_ss_padding_len = auth_info.auth_pad_len;
760
761         return NT_STATUS_OK;
762 }
763
764 /****************************************************************************
765  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
766  ****************************************************************************/
767
768 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
769                                 prs_struct *current_pdu,
770                                 uint8 *p_ss_padding_len)
771 {
772         NTSTATUS ret = NT_STATUS_OK;
773
774         /* Paranioa checks for auth_len. */
775         if (prhdr->auth_len) {
776                 if (prhdr->auth_len > prhdr->frag_len) {
777                         return NT_STATUS_INVALID_PARAMETER;
778                 }
779
780                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
781                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
782                         /* Integer wrap attempt. */
783                         return NT_STATUS_INVALID_PARAMETER;
784                 }
785         }
786
787         /*
788          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
789          */
790
791         switch(cli->auth->auth_type) {
792                 case PIPE_AUTH_TYPE_NONE:
793                         if (prhdr->auth_len) {
794                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
795                                           "Connection to %s - got non-zero "
796                                           "auth len %u.\n",
797                                         rpccli_pipe_txt(debug_ctx(), cli),
798                                         (unsigned int)prhdr->auth_len ));
799                                 return NT_STATUS_INVALID_PARAMETER;
800                         }
801                         break;
802
803                 case PIPE_AUTH_TYPE_NTLMSSP:
804                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
805                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
806                         if (!NT_STATUS_IS_OK(ret)) {
807                                 return ret;
808                         }
809                         break;
810
811                 case PIPE_AUTH_TYPE_SCHANNEL:
812                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
813                         if (!NT_STATUS_IS_OK(ret)) {
814                                 return ret;
815                         }
816                         break;
817
818                 case PIPE_AUTH_TYPE_KRB5:
819                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
820                 default:
821                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
822                                   "to %s - unknown internal auth type %u.\n",
823                                   rpccli_pipe_txt(debug_ctx(), cli),
824                                   cli->auth->auth_type ));
825                         return NT_STATUS_INVALID_INFO_CLASS;
826         }
827
828         return NT_STATUS_OK;
829 }
830
831 /****************************************************************************
832  Do basic authentication checks on an incoming pdu.
833  ****************************************************************************/
834
835 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
836                         prs_struct *current_pdu,
837                         uint8 expected_pkt_type,
838                         char **ppdata,
839                         uint32 *pdata_len,
840                         prs_struct *return_data)
841 {
842
843         NTSTATUS ret = NT_STATUS_OK;
844         uint32 current_pdu_len = prs_data_size(current_pdu);
845
846         if (current_pdu_len != prhdr->frag_len) {
847                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
848                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
849                 return NT_STATUS_INVALID_PARAMETER;
850         }
851
852         /*
853          * Point the return values at the real data including the RPC
854          * header. Just in case the caller wants it.
855          */
856         *ppdata = prs_data_p(current_pdu);
857         *pdata_len = current_pdu_len;
858
859         /* Ensure we have the correct type. */
860         switch (prhdr->pkt_type) {
861                 case RPC_ALTCONTRESP:
862                 case RPC_BINDACK:
863
864                         /* Alter context and bind ack share the same packet definitions. */
865                         break;
866
867
868                 case RPC_RESPONSE:
869                 {
870                         RPC_HDR_RESP rhdr_resp;
871                         uint8 ss_padding_len = 0;
872
873                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
874                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
875                                 return NT_STATUS_BUFFER_TOO_SMALL;
876                         }
877
878                         /* Here's where we deal with incoming sign/seal. */
879                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
880                                         current_pdu, &ss_padding_len);
881                         if (!NT_STATUS_IS_OK(ret)) {
882                                 return ret;
883                         }
884
885                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
886                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
887
888                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
889                                 return NT_STATUS_BUFFER_TOO_SMALL;
890                         }
891
892                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
893
894                         /* Remember to remove the auth footer. */
895                         if (prhdr->auth_len) {
896                                 /* We've already done integer wrap tests on auth_len in
897                                         cli_pipe_validate_rpc_response(). */
898                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
899                                         return NT_STATUS_BUFFER_TOO_SMALL;
900                                 }
901                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
902                         }
903
904                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
905                                 current_pdu_len, *pdata_len, ss_padding_len ));
906
907                         /*
908                          * If this is the first reply, and the allocation hint is reasonably, try and
909                          * set up the return_data parse_struct to the correct size.
910                          */
911
912                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
913                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
914                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
915                                                 "too large to allocate\n",
916                                                 (unsigned int)rhdr_resp.alloc_hint ));
917                                         return NT_STATUS_NO_MEMORY;
918                                 }
919                         }
920
921                         break;
922                 }
923
924                 case RPC_BINDNACK:
925                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
926                                   "received from %s!\n",
927                                   rpccli_pipe_txt(debug_ctx(), cli)));
928                         /* Use this for now... */
929                         return NT_STATUS_NETWORK_ACCESS_DENIED;
930
931                 case RPC_FAULT:
932                 {
933                         RPC_HDR_RESP rhdr_resp;
934                         RPC_HDR_FAULT fault_resp;
935
936                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
937                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
938                                 return NT_STATUS_BUFFER_TOO_SMALL;
939                         }
940
941                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
942                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
943                                 return NT_STATUS_BUFFER_TOO_SMALL;
944                         }
945
946                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
947                                   "code %s received from %s!\n",
948                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
949                                 rpccli_pipe_txt(debug_ctx(), cli)));
950                         if (NT_STATUS_IS_OK(fault_resp.status)) {
951                                 return NT_STATUS_UNSUCCESSFUL;
952                         } else {
953                                 return fault_resp.status;
954                         }
955                 }
956
957                 default:
958                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
959                                 "from %s!\n",
960                                 (unsigned int)prhdr->pkt_type,
961                                 rpccli_pipe_txt(debug_ctx(), cli)));
962                         return NT_STATUS_INVALID_INFO_CLASS;
963         }
964
965         if (prhdr->pkt_type != expected_pkt_type) {
966                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
967                           "got an unexpected RPC packet type - %u, not %u\n",
968                         rpccli_pipe_txt(debug_ctx(), cli),
969                         prhdr->pkt_type,
970                         expected_pkt_type));
971                 return NT_STATUS_INVALID_INFO_CLASS;
972         }
973
974         /* Do this just before return - we don't want to modify any rpc header
975            data before now as we may have needed to do cryptographic actions on
976            it before. */
977
978         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
979                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
980                         "setting fragment first/last ON.\n"));
981                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
982         }
983
984         return NT_STATUS_OK;
985 }
986
987 /****************************************************************************
988  Ensure we eat the just processed pdu from the current_pdu prs_struct.
989  Normally the frag_len and buffer size will match, but on the first trans
990  reply there is a theoretical chance that buffer size > frag_len, so we must
991  deal with that.
992  ****************************************************************************/
993
994 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
995 {
996         uint32 current_pdu_len = prs_data_size(current_pdu);
997
998         if (current_pdu_len < prhdr->frag_len) {
999                 return NT_STATUS_BUFFER_TOO_SMALL;
1000         }
1001
1002         /* Common case. */
1003         if (current_pdu_len == (uint32)prhdr->frag_len) {
1004                 prs_mem_free(current_pdu);
1005                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1006                 /* Make current_pdu dynamic with no memory. */
1007                 prs_give_memory(current_pdu, 0, 0, True);
1008                 return NT_STATUS_OK;
1009         }
1010
1011         /*
1012          * Oh no ! More data in buffer than we processed in current pdu.
1013          * Cheat. Move the data down and shrink the buffer.
1014          */
1015
1016         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1017                         current_pdu_len - prhdr->frag_len);
1018
1019         /* Remember to set the read offset back to zero. */
1020         prs_set_offset(current_pdu, 0);
1021
1022         /* Shrink the buffer. */
1023         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1024                 return NT_STATUS_BUFFER_TOO_SMALL;
1025         }
1026
1027         return NT_STATUS_OK;
1028 }
1029
1030 /****************************************************************************
1031  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1032 ****************************************************************************/
1033
1034 struct cli_api_pipe_state {
1035         struct event_context *ev;
1036         struct rpc_cli_transport *transport;
1037         uint8_t *rdata;
1038         uint32_t rdata_len;
1039 };
1040
1041 static void cli_api_pipe_trans_done(struct async_req *subreq);
1042 static void cli_api_pipe_write_done(struct async_req *subreq);
1043 static void cli_api_pipe_read_done(struct async_req *subreq);
1044
1045 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1046                                            struct event_context *ev,
1047                                            struct rpc_cli_transport *transport,
1048                                            uint8_t *data, size_t data_len,
1049                                            uint32_t max_rdata_len)
1050 {
1051         struct async_req *result, *subreq;
1052         struct cli_api_pipe_state *state;
1053         NTSTATUS status;
1054
1055         if (!async_req_setup(mem_ctx, &result, &state,
1056                              struct cli_api_pipe_state)) {
1057                 return NULL;
1058         }
1059         state->ev = ev;
1060         state->transport = transport;
1061
1062         if (max_rdata_len < RPC_HEADER_LEN) {
1063                 /*
1064                  * For a RPC reply we always need at least RPC_HEADER_LEN
1065                  * bytes. We check this here because we will receive
1066                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1067                  */
1068                 status = NT_STATUS_INVALID_PARAMETER;
1069                 goto post_status;
1070         }
1071
1072         if (transport->trans_send != NULL) {
1073                 subreq = transport->trans_send(state, ev, data, data_len,
1074                                                max_rdata_len, transport->priv);
1075                 if (subreq == NULL) {
1076                         status = NT_STATUS_NO_MEMORY;
1077                         goto post_status;
1078                 }
1079                 subreq->async.fn = cli_api_pipe_trans_done;
1080                 subreq->async.priv = result;
1081                 return result;
1082         }
1083
1084         /*
1085          * If the transport does not provide a "trans" routine, i.e. for
1086          * example the ncacn_ip_tcp transport, do the write/read step here.
1087          */
1088
1089         subreq = rpc_write_send(state, ev, transport, data, data_len);
1090         if (subreq == NULL) {
1091                 goto fail;
1092         }
1093         subreq->async.fn = cli_api_pipe_write_done;
1094         subreq->async.priv = result;
1095         return result;
1096
1097         status = NT_STATUS_INVALID_PARAMETER;
1098
1099  post_status:
1100         if (async_post_ntstatus(result, ev, status)) {
1101                 return result;
1102         }
1103  fail:
1104         TALLOC_FREE(result);
1105         return NULL;
1106 }
1107
1108 static void cli_api_pipe_trans_done(struct async_req *subreq)
1109 {
1110         struct async_req *req = talloc_get_type_abort(
1111                 subreq->async.priv, struct async_req);
1112         struct cli_api_pipe_state *state = talloc_get_type_abort(
1113                 req->private_data, struct cli_api_pipe_state);
1114         NTSTATUS status;
1115
1116         status = state->transport->trans_recv(subreq, state, &state->rdata,
1117                                               &state->rdata_len);
1118         TALLOC_FREE(subreq);
1119         if (!NT_STATUS_IS_OK(status)) {
1120                 async_req_nterror(req, status);
1121                 return;
1122         }
1123         async_req_done(req);
1124 }
1125
1126 static void cli_api_pipe_write_done(struct async_req *subreq)
1127 {
1128         struct async_req *req = talloc_get_type_abort(
1129                 subreq->async.priv, struct async_req);
1130         struct cli_api_pipe_state *state = talloc_get_type_abort(
1131                 req->private_data, struct cli_api_pipe_state);
1132         NTSTATUS status;
1133
1134         status = rpc_write_recv(subreq);
1135         TALLOC_FREE(subreq);
1136         if (!NT_STATUS_IS_OK(status)) {
1137                 async_req_nterror(req, status);
1138                 return;
1139         }
1140
1141         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1142         if (async_req_nomem(state->rdata, req)) {
1143                 return;
1144         }
1145
1146         /*
1147          * We don't need to use rpc_read_send here, the upper layer will cope
1148          * with a short read, transport->trans_send could also return less
1149          * than state->max_rdata_len.
1150          */
1151         subreq = state->transport->read_send(state, state->ev, state->rdata,
1152                                              RPC_HEADER_LEN,
1153                                              state->transport->priv);
1154         if (async_req_nomem(subreq, req)) {
1155                 return;
1156         }
1157         subreq->async.fn = cli_api_pipe_read_done;
1158         subreq->async.priv = req;
1159 }
1160
1161 static void cli_api_pipe_read_done(struct async_req *subreq)
1162 {
1163         struct async_req *req = talloc_get_type_abort(
1164                 subreq->async.priv, struct async_req);
1165         struct cli_api_pipe_state *state = talloc_get_type_abort(
1166                 req->private_data, struct cli_api_pipe_state);
1167         NTSTATUS status;
1168         ssize_t received;
1169
1170         status = state->transport->read_recv(subreq, &received);
1171         TALLOC_FREE(subreq);
1172         if (!NT_STATUS_IS_OK(status)) {
1173                 async_req_nterror(req, status);
1174                 return;
1175         }
1176         state->rdata_len = received;
1177         async_req_done(req);
1178 }
1179
1180 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1181                                   uint8_t **prdata, uint32_t *prdata_len)
1182 {
1183         struct cli_api_pipe_state *state = talloc_get_type_abort(
1184                 req->private_data, struct cli_api_pipe_state);
1185         NTSTATUS status;
1186
1187         if (async_req_is_nterror(req, &status)) {
1188                 return status;
1189         }
1190
1191         *prdata = talloc_move(mem_ctx, &state->rdata);
1192         *prdata_len = state->rdata_len;
1193         return NT_STATUS_OK;
1194 }
1195
1196 /****************************************************************************
1197  Send data on an rpc pipe via trans. The prs_struct data must be the last
1198  pdu fragment of an NDR data stream.
1199
1200  Receive response data from an rpc pipe, which may be large...
1201
1202  Read the first fragment: unfortunately have to use SMBtrans for the first
1203  bit, then SMBreadX for subsequent bits.
1204
1205  If first fragment received also wasn't the last fragment, continue
1206  getting fragments until we _do_ receive the last fragment.
1207
1208  Request/Response PDU's look like the following...
1209
1210  |<------------------PDU len----------------------------------------------->|
1211  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1212
1213  +------------+-----------------+-------------+---------------+-------------+
1214  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1215  +------------+-----------------+-------------+---------------+-------------+
1216
1217  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1218  signing & sealing being negotiated.
1219
1220  ****************************************************************************/
1221
1222 struct rpc_api_pipe_state {
1223         struct event_context *ev;
1224         struct rpc_pipe_client *cli;
1225         uint8_t expected_pkt_type;
1226
1227         prs_struct incoming_frag;
1228         struct rpc_hdr_info rhdr;
1229
1230         prs_struct incoming_pdu;        /* Incoming reply */
1231         uint32_t incoming_pdu_offset;
1232 };
1233
1234 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1235 {
1236         prs_mem_free(&state->incoming_frag);
1237         prs_mem_free(&state->incoming_pdu);
1238         return 0;
1239 }
1240
1241 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1242 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1243
1244 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1245                                            struct event_context *ev,
1246                                            struct rpc_pipe_client *cli,
1247                                            prs_struct *data, /* Outgoing PDU */
1248                                            uint8_t expected_pkt_type)
1249 {
1250         struct async_req *result, *subreq;
1251         struct rpc_api_pipe_state *state;
1252         uint16_t max_recv_frag;
1253         NTSTATUS status;
1254
1255         if (!async_req_setup(mem_ctx, &result, &state,
1256                              struct rpc_api_pipe_state)) {
1257                 return NULL;
1258         }
1259         state->ev = ev;
1260         state->cli = cli;
1261         state->expected_pkt_type = expected_pkt_type;
1262         state->incoming_pdu_offset = 0;
1263
1264         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1265
1266         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1267         /* Make incoming_pdu dynamic with no memory. */
1268         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1269
1270         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1271
1272         /*
1273          * Ensure we're not sending too much.
1274          */
1275         if (prs_offset(data) > cli->max_xmit_frag) {
1276                 status = NT_STATUS_INVALID_PARAMETER;
1277                 goto post_status;
1278         }
1279
1280         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1281
1282         max_recv_frag = cli->max_recv_frag;
1283
1284 #if 0
1285         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1286 #endif
1287
1288         subreq = cli_api_pipe_send(state, ev, cli->transport,
1289                                    (uint8_t *)prs_data_p(data),
1290                                    prs_offset(data), max_recv_frag);
1291         if (subreq == NULL) {
1292                 status = NT_STATUS_NO_MEMORY;
1293                 goto post_status;
1294         }
1295         subreq->async.fn = rpc_api_pipe_trans_done;
1296         subreq->async.priv = result;
1297         return result;
1298
1299  post_status:
1300         if (async_post_ntstatus(result, ev, status)) {
1301                 return result;
1302         }
1303         TALLOC_FREE(result);
1304         return NULL;
1305 }
1306
1307 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1308 {
1309         struct async_req *req = talloc_get_type_abort(
1310                 subreq->async.priv, struct async_req);
1311         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1312                 req->private_data, struct rpc_api_pipe_state);
1313         NTSTATUS status;
1314         uint8_t *rdata = NULL;
1315         uint32_t rdata_len = 0;
1316         char *rdata_copy;
1317
1318         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1319         TALLOC_FREE(subreq);
1320         if (!NT_STATUS_IS_OK(status)) {
1321                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1322                 async_req_nterror(req, status);
1323                 return;
1324         }
1325
1326         if (rdata == NULL) {
1327                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1328                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1329                 async_req_done(req);
1330                 return;
1331         }
1332
1333         /*
1334          * Give the memory received from cli_trans as dynamic to the current
1335          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1336          * :-(
1337          */
1338         rdata_copy = (char *)memdup(rdata, rdata_len);
1339         TALLOC_FREE(rdata);
1340         if (async_req_nomem(rdata_copy, req)) {
1341                 return;
1342         }
1343         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1344
1345         /* Ensure we have enough data for a pdu. */
1346         subreq = get_complete_frag_send(state, state->ev, state->cli,
1347                                         &state->rhdr, &state->incoming_frag);
1348         if (async_req_nomem(subreq, req)) {
1349                 return;
1350         }
1351         subreq->async.fn = rpc_api_pipe_got_pdu;
1352         subreq->async.priv = req;
1353 }
1354
1355 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1356 {
1357         struct async_req *req = talloc_get_type_abort(
1358                 subreq->async.priv, struct async_req);
1359         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1360                 req->private_data, struct rpc_api_pipe_state);
1361         NTSTATUS status;
1362         char *rdata = NULL;
1363         uint32_t rdata_len = 0;
1364
1365         status = get_complete_frag_recv(subreq);
1366         TALLOC_FREE(subreq);
1367         if (!NT_STATUS_IS_OK(status)) {
1368                 DEBUG(5, ("get_complete_frag failed: %s\n",
1369                           nt_errstr(status)));
1370                 async_req_nterror(req, status);
1371                 return;
1372         }
1373
1374         status = cli_pipe_validate_current_pdu(
1375                 state->cli, &state->rhdr, &state->incoming_frag,
1376                 state->expected_pkt_type, &rdata, &rdata_len,
1377                 &state->incoming_pdu);
1378
1379         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1380                   (unsigned)prs_data_size(&state->incoming_frag),
1381                   (unsigned)state->incoming_pdu_offset,
1382                   nt_errstr(status)));
1383
1384         if (!NT_STATUS_IS_OK(status)) {
1385                 async_req_nterror(req, status);
1386                 return;
1387         }
1388
1389         if ((state->rhdr.flags & RPC_FLG_FIRST)
1390             && (state->rhdr.pack_type[0] == 0)) {
1391                 /*
1392                  * Set the data type correctly for big-endian data on the
1393                  * first packet.
1394                  */
1395                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1396                           "big-endian.\n",
1397                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1398                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1399         }
1400         /*
1401          * Check endianness on subsequent packets.
1402          */
1403         if (state->incoming_frag.bigendian_data
1404             != state->incoming_pdu.bigendian_data) {
1405                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1406                          "%s\n",
1407                          state->incoming_pdu.bigendian_data?"big":"little",
1408                          state->incoming_frag.bigendian_data?"big":"little"));
1409                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1410                 return;
1411         }
1412
1413         /* Now copy the data portion out of the pdu into rbuf. */
1414         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1415                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1416                 return;
1417         }
1418
1419         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1420                rdata, (size_t)rdata_len);
1421         state->incoming_pdu_offset += rdata_len;
1422
1423         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1424                                             &state->incoming_frag);
1425         if (!NT_STATUS_IS_OK(status)) {
1426                 async_req_nterror(req, status);
1427                 return;
1428         }
1429
1430         if (state->rhdr.flags & RPC_FLG_LAST) {
1431                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1432                           rpccli_pipe_txt(debug_ctx(), state->cli),
1433                           (unsigned)prs_data_size(&state->incoming_pdu)));
1434                 async_req_done(req);
1435                 return;
1436         }
1437
1438         subreq = get_complete_frag_send(state, state->ev, state->cli,
1439                                         &state->rhdr, &state->incoming_frag);
1440         if (async_req_nomem(subreq, req)) {
1441                 return;
1442         }
1443         subreq->async.fn = rpc_api_pipe_got_pdu;
1444         subreq->async.priv = req;
1445 }
1446
1447 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1448                                   prs_struct *reply_pdu)
1449 {
1450         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1451                 req->private_data, struct rpc_api_pipe_state);
1452         NTSTATUS status;
1453
1454         if (async_req_is_nterror(req, &status)) {
1455                 return status;
1456         }
1457
1458         *reply_pdu = state->incoming_pdu;
1459         reply_pdu->mem_ctx = mem_ctx;
1460
1461         /*
1462          * Prevent state->incoming_pdu from being freed in
1463          * rpc_api_pipe_state_destructor()
1464          */
1465         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1466
1467         return NT_STATUS_OK;
1468 }
1469
1470 /*******************************************************************
1471  Creates krb5 auth bind.
1472  ********************************************************************/
1473
1474 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1475                                                 enum pipe_auth_level auth_level,
1476                                                 RPC_HDR_AUTH *pauth_out,
1477                                                 prs_struct *auth_data)
1478 {
1479 #ifdef HAVE_KRB5
1480         int ret;
1481         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1482         DATA_BLOB tkt = data_blob_null;
1483         DATA_BLOB tkt_wrapped = data_blob_null;
1484
1485         /* We may change the pad length before marshalling. */
1486         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1487
1488         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1489                 a->service_principal ));
1490
1491         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1492
1493         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1494                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1495
1496         if (ret) {
1497                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1498                         "failed with %s\n",
1499                         a->service_principal,
1500                         error_message(ret) ));
1501
1502                 data_blob_free(&tkt);
1503                 prs_mem_free(auth_data);
1504                 return NT_STATUS_INVALID_PARAMETER;
1505         }
1506
1507         /* wrap that up in a nice GSS-API wrapping */
1508         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1509
1510         data_blob_free(&tkt);
1511
1512         /* Auth len in the rpc header doesn't include auth_header. */
1513         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1514                 data_blob_free(&tkt_wrapped);
1515                 prs_mem_free(auth_data);
1516                 return NT_STATUS_NO_MEMORY;
1517         }
1518
1519         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1520         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1521
1522         data_blob_free(&tkt_wrapped);
1523         return NT_STATUS_OK;
1524 #else
1525         return NT_STATUS_INVALID_PARAMETER;
1526 #endif
1527 }
1528
1529 /*******************************************************************
1530  Creates SPNEGO NTLMSSP auth bind.
1531  ********************************************************************/
1532
1533 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1534                                                 enum pipe_auth_level auth_level,
1535                                                 RPC_HDR_AUTH *pauth_out,
1536                                                 prs_struct *auth_data)
1537 {
1538         NTSTATUS nt_status;
1539         DATA_BLOB null_blob = data_blob_null;
1540         DATA_BLOB request = data_blob_null;
1541         DATA_BLOB spnego_msg = data_blob_null;
1542
1543         /* We may change the pad length before marshalling. */
1544         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1545
1546         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1547         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1548                                         null_blob,
1549                                         &request);
1550
1551         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1552                 data_blob_free(&request);
1553                 prs_mem_free(auth_data);
1554                 return nt_status;
1555         }
1556
1557         /* Wrap this in SPNEGO. */
1558         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1559
1560         data_blob_free(&request);
1561
1562         /* Auth len in the rpc header doesn't include auth_header. */
1563         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1564                 data_blob_free(&spnego_msg);
1565                 prs_mem_free(auth_data);
1566                 return NT_STATUS_NO_MEMORY;
1567         }
1568
1569         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1570         dump_data(5, spnego_msg.data, spnego_msg.length);
1571
1572         data_blob_free(&spnego_msg);
1573         return NT_STATUS_OK;
1574 }
1575
1576 /*******************************************************************
1577  Creates NTLMSSP auth bind.
1578  ********************************************************************/
1579
1580 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1581                                                 enum pipe_auth_level auth_level,
1582                                                 RPC_HDR_AUTH *pauth_out,
1583                                                 prs_struct *auth_data)
1584 {
1585         NTSTATUS nt_status;
1586         DATA_BLOB null_blob = data_blob_null;
1587         DATA_BLOB request = data_blob_null;
1588
1589         /* We may change the pad length before marshalling. */
1590         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1591
1592         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1593         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1594                                         null_blob,
1595                                         &request);
1596
1597         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1598                 data_blob_free(&request);
1599                 prs_mem_free(auth_data);
1600                 return nt_status;
1601         }
1602
1603         /* Auth len in the rpc header doesn't include auth_header. */
1604         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1605                 data_blob_free(&request);
1606                 prs_mem_free(auth_data);
1607                 return NT_STATUS_NO_MEMORY;
1608         }
1609
1610         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1611         dump_data(5, request.data, request.length);
1612
1613         data_blob_free(&request);
1614         return NT_STATUS_OK;
1615 }
1616
1617 /*******************************************************************
1618  Creates schannel auth bind.
1619  ********************************************************************/
1620
1621 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1622                                                 enum pipe_auth_level auth_level,
1623                                                 RPC_HDR_AUTH *pauth_out,
1624                                                 prs_struct *auth_data)
1625 {
1626         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1627
1628         /* We may change the pad length before marshalling. */
1629         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1630
1631         /* Use lp_workgroup() if domain not specified */
1632
1633         if (!cli->auth->domain || !cli->auth->domain[0]) {
1634                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1635                 if (cli->auth->domain == NULL) {
1636                         return NT_STATUS_NO_MEMORY;
1637                 }
1638         }
1639
1640         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1641                                    global_myname());
1642
1643         /*
1644          * Now marshall the data into the auth parse_struct.
1645          */
1646
1647         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1648                                        &schannel_neg, auth_data, 0)) {
1649                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1650                 prs_mem_free(auth_data);
1651                 return NT_STATUS_NO_MEMORY;
1652         }
1653
1654         return NT_STATUS_OK;
1655 }
1656
1657 /*******************************************************************
1658  Creates the internals of a DCE/RPC bind request or alter context PDU.
1659  ********************************************************************/
1660
1661 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1662                                                 prs_struct *rpc_out, 
1663                                                 uint32 rpc_call_id,
1664                                                 const RPC_IFACE *abstract,
1665                                                 const RPC_IFACE *transfer,
1666                                                 RPC_HDR_AUTH *phdr_auth,
1667                                                 prs_struct *pauth_info)
1668 {
1669         RPC_HDR hdr;
1670         RPC_HDR_RB hdr_rb;
1671         RPC_CONTEXT rpc_ctx;
1672         uint16 auth_len = prs_offset(pauth_info);
1673         uint8 ss_padding_len = 0;
1674         uint16 frag_len = 0;
1675
1676         /* create the RPC context. */
1677         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1678
1679         /* create the bind request RPC_HDR_RB */
1680         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1681
1682         /* Start building the frag length. */
1683         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1684
1685         /* Do we need to pad ? */
1686         if (auth_len) {
1687                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1688                 if (data_len % 8) {
1689                         ss_padding_len = 8 - (data_len % 8);
1690                         phdr_auth->auth_pad_len = ss_padding_len;
1691                 }
1692                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1693         }
1694
1695         /* Create the request RPC_HDR */
1696         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1697
1698         /* Marshall the RPC header */
1699         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1700                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1701                 return NT_STATUS_NO_MEMORY;
1702         }
1703
1704         /* Marshall the bind request data */
1705         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1706                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1707                 return NT_STATUS_NO_MEMORY;
1708         }
1709
1710         /*
1711          * Grow the outgoing buffer to store any auth info.
1712          */
1713
1714         if(auth_len != 0) {
1715                 if (ss_padding_len) {
1716                         char pad[8];
1717                         memset(pad, '\0', 8);
1718                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1719                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1720                                 return NT_STATUS_NO_MEMORY;
1721                         }
1722                 }
1723
1724                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1725                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1726                         return NT_STATUS_NO_MEMORY;
1727                 }
1728
1729
1730                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1731                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1732                         return NT_STATUS_NO_MEMORY;
1733                 }
1734         }
1735
1736         return NT_STATUS_OK;
1737 }
1738
1739 /*******************************************************************
1740  Creates a DCE/RPC bind request.
1741  ********************************************************************/
1742
1743 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1744                                 prs_struct *rpc_out, 
1745                                 uint32 rpc_call_id,
1746                                 const RPC_IFACE *abstract,
1747                                 const RPC_IFACE *transfer,
1748                                 enum pipe_auth_type auth_type,
1749                                 enum pipe_auth_level auth_level)
1750 {
1751         RPC_HDR_AUTH hdr_auth;
1752         prs_struct auth_info;
1753         NTSTATUS ret = NT_STATUS_OK;
1754
1755         ZERO_STRUCT(hdr_auth);
1756         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1757                 return NT_STATUS_NO_MEMORY;
1758
1759         switch (auth_type) {
1760                 case PIPE_AUTH_TYPE_SCHANNEL:
1761                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1762                         if (!NT_STATUS_IS_OK(ret)) {
1763                                 prs_mem_free(&auth_info);
1764                                 return ret;
1765                         }
1766                         break;
1767
1768                 case PIPE_AUTH_TYPE_NTLMSSP:
1769                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1770                         if (!NT_STATUS_IS_OK(ret)) {
1771                                 prs_mem_free(&auth_info);
1772                                 return ret;
1773                         }
1774                         break;
1775
1776                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1777                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1778                         if (!NT_STATUS_IS_OK(ret)) {
1779                                 prs_mem_free(&auth_info);
1780                                 return ret;
1781                         }
1782                         break;
1783
1784                 case PIPE_AUTH_TYPE_KRB5:
1785                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1786                         if (!NT_STATUS_IS_OK(ret)) {
1787                                 prs_mem_free(&auth_info);
1788                                 return ret;
1789                         }
1790                         break;
1791
1792                 case PIPE_AUTH_TYPE_NONE:
1793                         break;
1794
1795                 default:
1796                         /* "Can't" happen. */
1797                         return NT_STATUS_INVALID_INFO_CLASS;
1798         }
1799
1800         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1801                                                 rpc_out, 
1802                                                 rpc_call_id,
1803                                                 abstract,
1804                                                 transfer,
1805                                                 &hdr_auth,
1806                                                 &auth_info);
1807
1808         prs_mem_free(&auth_info);
1809         return ret;
1810 }
1811
1812 /*******************************************************************
1813  Create and add the NTLMSSP sign/seal auth header and data.
1814  ********************************************************************/
1815
1816 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1817                                         RPC_HDR *phdr,
1818                                         uint32 ss_padding_len,
1819                                         prs_struct *outgoing_pdu)
1820 {
1821         RPC_HDR_AUTH auth_info;
1822         NTSTATUS status;
1823         DATA_BLOB auth_blob = data_blob_null;
1824         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1825
1826         if (!cli->auth->a_u.ntlmssp_state) {
1827                 return NT_STATUS_INVALID_PARAMETER;
1828         }
1829
1830         /* Init and marshall the auth header. */
1831         init_rpc_hdr_auth(&auth_info,
1832                         map_pipe_auth_type_to_rpc_auth_type(
1833                                 cli->auth->auth_type),
1834                         cli->auth->auth_level,
1835                         ss_padding_len,
1836                         1 /* context id. */);
1837
1838         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1839                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1840                 data_blob_free(&auth_blob);
1841                 return NT_STATUS_NO_MEMORY;
1842         }
1843
1844         switch (cli->auth->auth_level) {
1845                 case PIPE_AUTH_LEVEL_PRIVACY:
1846                         /* Data portion is encrypted. */
1847                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1848                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1849                                         data_and_pad_len,
1850                                         (unsigned char *)prs_data_p(outgoing_pdu),
1851                                         (size_t)prs_offset(outgoing_pdu),
1852                                         &auth_blob);
1853                         if (!NT_STATUS_IS_OK(status)) {
1854                                 data_blob_free(&auth_blob);
1855                                 return status;
1856                         }
1857                         break;
1858
1859                 case PIPE_AUTH_LEVEL_INTEGRITY:
1860                         /* Data is signed. */
1861                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1862                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1863                                         data_and_pad_len,
1864                                         (unsigned char *)prs_data_p(outgoing_pdu),
1865                                         (size_t)prs_offset(outgoing_pdu),
1866                                         &auth_blob);
1867                         if (!NT_STATUS_IS_OK(status)) {
1868                                 data_blob_free(&auth_blob);
1869                                 return status;
1870                         }
1871                         break;
1872
1873                 default:
1874                         /* Can't happen. */
1875                         smb_panic("bad auth level");
1876                         /* Notreached. */
1877                         return NT_STATUS_INVALID_PARAMETER;
1878         }
1879
1880         /* Finally marshall the blob. */
1881
1882         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1883                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1884                         (unsigned int)NTLMSSP_SIG_SIZE));
1885                 data_blob_free(&auth_blob);
1886                 return NT_STATUS_NO_MEMORY;
1887         }
1888
1889         data_blob_free(&auth_blob);
1890         return NT_STATUS_OK;
1891 }
1892
1893 /*******************************************************************
1894  Create and add the schannel sign/seal auth header and data.
1895  ********************************************************************/
1896
1897 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1898                                         RPC_HDR *phdr,
1899                                         uint32 ss_padding_len,
1900                                         prs_struct *outgoing_pdu)
1901 {
1902         RPC_HDR_AUTH auth_info;
1903         RPC_AUTH_SCHANNEL_CHK verf;
1904         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1905         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1906         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1907
1908         if (!sas) {
1909                 return NT_STATUS_INVALID_PARAMETER;
1910         }
1911
1912         /* Init and marshall the auth header. */
1913         init_rpc_hdr_auth(&auth_info,
1914                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1915                         cli->auth->auth_level,
1916                         ss_padding_len,
1917                         1 /* context id. */);
1918
1919         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1920                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1921                 return NT_STATUS_NO_MEMORY;
1922         }
1923
1924         switch (cli->auth->auth_level) {
1925                 case PIPE_AUTH_LEVEL_PRIVACY:
1926                 case PIPE_AUTH_LEVEL_INTEGRITY:
1927                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1928                                 sas->seq_num));
1929
1930                         schannel_encode(sas,
1931                                         cli->auth->auth_level,
1932                                         SENDER_IS_INITIATOR,
1933                                         &verf,
1934                                         data_p,
1935                                         data_and_pad_len);
1936
1937                         sas->seq_num++;
1938                         break;
1939
1940                 default:
1941                         /* Can't happen. */
1942                         smb_panic("bad auth level");
1943                         /* Notreached. */
1944                         return NT_STATUS_INVALID_PARAMETER;
1945         }
1946
1947         /* Finally marshall the blob. */
1948         smb_io_rpc_auth_schannel_chk("",
1949                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1950                         &verf,
1951                         outgoing_pdu,
1952                         0);
1953
1954         return NT_STATUS_OK;
1955 }
1956
1957 /*******************************************************************
1958  Calculate how much data we're going to send in this packet, also
1959  work out any sign/seal padding length.
1960  ********************************************************************/
1961
1962 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1963                                         uint32 data_left,
1964                                         uint16 *p_frag_len,
1965                                         uint16 *p_auth_len,
1966                                         uint32 *p_ss_padding)
1967 {
1968         uint32 data_space, data_len;
1969
1970 #if 0
1971         if ((data_left > 0) && (sys_random() % 2)) {
1972                 data_left = MAX(data_left/2, 1);
1973         }
1974 #endif
1975
1976         switch (cli->auth->auth_level) {
1977                 case PIPE_AUTH_LEVEL_NONE:
1978                 case PIPE_AUTH_LEVEL_CONNECT:
1979                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1980                         data_len = MIN(data_space, data_left);
1981                         *p_ss_padding = 0;
1982                         *p_auth_len = 0;
1983                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1984                         return data_len;
1985
1986                 case PIPE_AUTH_LEVEL_INTEGRITY:
1987                 case PIPE_AUTH_LEVEL_PRIVACY:
1988                         /* Treat the same for all authenticated rpc requests. */
1989                         switch(cli->auth->auth_type) {
1990                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1991                                 case PIPE_AUTH_TYPE_NTLMSSP:
1992                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1993                                         break;
1994                                 case PIPE_AUTH_TYPE_SCHANNEL:
1995                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1996                                         break;
1997                                 default:
1998                                         smb_panic("bad auth type");
1999                                         break;
2000                         }
2001
2002                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2003                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2004
2005                         data_len = MIN(data_space, data_left);
2006                         *p_ss_padding = 0;
2007                         if (data_len % 8) {
2008                                 *p_ss_padding = 8 - (data_len % 8);
2009                         }
2010                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2011                                         data_len + *p_ss_padding +              /* data plus padding. */
2012                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2013                         return data_len;
2014
2015                 default:
2016                         smb_panic("bad auth level");
2017                         /* Notreached. */
2018                         return 0;
2019         }
2020 }
2021
2022 /*******************************************************************
2023  External interface.
2024  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2025  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2026  and deals with signing/sealing details.
2027  ********************************************************************/
2028
2029 struct rpc_api_pipe_req_state {
2030         struct event_context *ev;
2031         struct rpc_pipe_client *cli;
2032         uint8_t op_num;
2033         uint32_t call_id;
2034         prs_struct *req_data;
2035         uint32_t req_data_sent;
2036         prs_struct outgoing_frag;
2037         prs_struct reply_pdu;
2038 };
2039
2040 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2041 {
2042         prs_mem_free(&s->outgoing_frag);
2043         prs_mem_free(&s->reply_pdu);
2044         return 0;
2045 }
2046
2047 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2048 static void rpc_api_pipe_req_done(struct async_req *subreq);
2049 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2050                                   bool *is_last_frag);
2051
2052 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2053                                         struct event_context *ev,
2054                                         struct rpc_pipe_client *cli,
2055                                         uint8_t op_num,
2056                                         prs_struct *req_data)
2057 {
2058         struct async_req *result, *subreq;
2059         struct rpc_api_pipe_req_state *state;
2060         NTSTATUS status;
2061         bool is_last_frag;
2062
2063         if (!async_req_setup(mem_ctx, &result, &state,
2064                              struct rpc_api_pipe_req_state)) {
2065                 return NULL;
2066         }
2067         state->ev = ev;
2068         state->cli = cli;
2069         state->op_num = op_num;
2070         state->req_data = req_data;
2071         state->req_data_sent = 0;
2072         state->call_id = get_rpc_call_id();
2073
2074         if (cli->max_xmit_frag
2075             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2076                 /* Server is screwed up ! */
2077                 status = NT_STATUS_INVALID_PARAMETER;
2078                 goto post_status;
2079         }
2080
2081         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2082
2083         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2084                       state, MARSHALL)) {
2085                 status = NT_STATUS_NO_MEMORY;
2086                 goto post_status;
2087         }
2088
2089         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2090
2091         status = prepare_next_frag(state, &is_last_frag);
2092         if (!NT_STATUS_IS_OK(status)) {
2093                 goto post_status;
2094         }
2095
2096         if (is_last_frag) {
2097                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2098                                            &state->outgoing_frag,
2099                                            RPC_RESPONSE);
2100                 if (subreq == NULL) {
2101                         status = NT_STATUS_NO_MEMORY;
2102                         goto post_status;
2103                 }
2104                 subreq->async.fn = rpc_api_pipe_req_done;
2105                 subreq->async.priv = result;
2106         } else {
2107                 subreq = rpc_write_send(
2108                         state, ev, cli->transport,
2109                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2110                         prs_offset(&state->outgoing_frag));
2111                 if (subreq == NULL) {
2112                         status = NT_STATUS_NO_MEMORY;
2113                         goto post_status;
2114                 }
2115                 subreq->async.fn = rpc_api_pipe_req_write_done;
2116                 subreq->async.priv = result;
2117         }
2118         return result;
2119
2120  post_status:
2121         if (async_post_ntstatus(result, ev, status)) {
2122                 return result;
2123         }
2124         TALLOC_FREE(result);
2125         return NULL;
2126 }
2127
2128 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2129                                   bool *is_last_frag)
2130 {
2131         RPC_HDR hdr;
2132         RPC_HDR_REQ hdr_req;
2133         uint32_t data_sent_thistime;
2134         uint16_t auth_len;
2135         uint16_t frag_len;
2136         uint8_t flags = 0;
2137         uint32_t ss_padding;
2138         uint32_t data_left;
2139         char pad[8] = { 0, };
2140         NTSTATUS status;
2141
2142         data_left = prs_offset(state->req_data) - state->req_data_sent;
2143
2144         data_sent_thistime = calculate_data_len_tosend(
2145                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2146
2147         if (state->req_data_sent == 0) {
2148                 flags = RPC_FLG_FIRST;
2149         }
2150
2151         if (data_sent_thistime == data_left) {
2152                 flags |= RPC_FLG_LAST;
2153         }
2154
2155         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2156                 return NT_STATUS_NO_MEMORY;
2157         }
2158
2159         /* Create and marshall the header and request header. */
2160         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2161                      auth_len);
2162
2163         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2164                 return NT_STATUS_NO_MEMORY;
2165         }
2166
2167         /* Create the rpc request RPC_HDR_REQ */
2168         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2169                          state->op_num);
2170
2171         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2172                                 &state->outgoing_frag, 0)) {
2173                 return NT_STATUS_NO_MEMORY;
2174         }
2175
2176         /* Copy in the data, plus any ss padding. */
2177         if (!prs_append_some_prs_data(&state->outgoing_frag,
2178                                       state->req_data, state->req_data_sent,
2179                                       data_sent_thistime)) {
2180                 return NT_STATUS_NO_MEMORY;
2181         }
2182
2183         /* Copy the sign/seal padding data. */
2184         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2185                 return NT_STATUS_NO_MEMORY;
2186         }
2187
2188         /* Generate any auth sign/seal and add the auth footer. */
2189         switch (state->cli->auth->auth_type) {
2190         case PIPE_AUTH_TYPE_NONE:
2191                 status = NT_STATUS_OK;
2192                 break;
2193         case PIPE_AUTH_TYPE_NTLMSSP:
2194         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2195                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2196                                                  &state->outgoing_frag);
2197                 break;
2198         case PIPE_AUTH_TYPE_SCHANNEL:
2199                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2200                                                   &state->outgoing_frag);
2201                 break;
2202         default:
2203                 status = NT_STATUS_INVALID_PARAMETER;
2204                 break;
2205         }
2206
2207         state->req_data_sent += data_sent_thistime;
2208         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2209
2210         return status;
2211 }
2212
2213 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2214 {
2215         struct async_req *req = talloc_get_type_abort(
2216                 subreq->async.priv, struct async_req);
2217         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2218                 req->private_data, struct rpc_api_pipe_req_state);
2219         NTSTATUS status;
2220         bool is_last_frag;
2221
2222         status = rpc_write_recv(subreq);
2223         TALLOC_FREE(subreq);
2224         if (!NT_STATUS_IS_OK(status)) {
2225                 async_req_nterror(req, status);
2226                 return;
2227         }
2228
2229         status = prepare_next_frag(state, &is_last_frag);
2230         if (!NT_STATUS_IS_OK(status)) {
2231                 async_req_nterror(req, status);
2232                 return;
2233         }
2234
2235         if (is_last_frag) {
2236                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2237                                            &state->outgoing_frag,
2238                                            RPC_RESPONSE);
2239                 if (async_req_nomem(subreq, req)) {
2240                         return;
2241                 }
2242                 subreq->async.fn = rpc_api_pipe_req_done;
2243                 subreq->async.priv = req;
2244         } else {
2245                 subreq = rpc_write_send(
2246                         state, state->ev,
2247                         state->cli->transport,
2248                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2249                         prs_offset(&state->outgoing_frag));
2250                 if (async_req_nomem(subreq, req)) {
2251                         return;
2252                 }
2253                 subreq->async.fn = rpc_api_pipe_req_write_done;
2254                 subreq->async.priv = req;
2255         }
2256 }
2257
2258 static void rpc_api_pipe_req_done(struct async_req *subreq)
2259 {
2260         struct async_req *req = talloc_get_type_abort(
2261                 subreq->async.priv, struct async_req);
2262         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2263                 req->private_data, struct rpc_api_pipe_req_state);
2264         NTSTATUS status;
2265
2266         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2267         TALLOC_FREE(subreq);
2268         if (!NT_STATUS_IS_OK(status)) {
2269                 async_req_nterror(req, status);
2270                 return;
2271         }
2272         async_req_done(req);
2273 }
2274
2275 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2276                                prs_struct *reply_pdu)
2277 {
2278         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2279                 req->private_data, struct rpc_api_pipe_req_state);
2280         NTSTATUS status;
2281
2282         if (async_req_is_nterror(req, &status)) {
2283                 /*
2284                  * We always have to initialize to reply pdu, even if there is
2285                  * none. The rpccli_* caller routines expect this.
2286                  */
2287                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2288                 return status;
2289         }
2290
2291         *reply_pdu = state->reply_pdu;
2292         reply_pdu->mem_ctx = mem_ctx;
2293
2294         /*
2295          * Prevent state->req_pdu from being freed in
2296          * rpc_api_pipe_req_state_destructor()
2297          */
2298         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2299
2300         return NT_STATUS_OK;
2301 }
2302
2303 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2304                         uint8 op_num,
2305                         prs_struct *in_data,
2306                         prs_struct *out_data)
2307 {
2308         TALLOC_CTX *frame = talloc_stackframe();
2309         struct event_context *ev;
2310         struct async_req *req;
2311         NTSTATUS status = NT_STATUS_NO_MEMORY;
2312
2313         ev = event_context_init(frame);
2314         if (ev == NULL) {
2315                 goto fail;
2316         }
2317
2318         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2319         if (req == NULL) {
2320                 goto fail;
2321         }
2322
2323         while (req->state < ASYNC_REQ_DONE) {
2324                 event_loop_once(ev);
2325         }
2326
2327         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2328  fail:
2329         TALLOC_FREE(frame);
2330         return status;
2331 }
2332
2333 #if 0
2334 /****************************************************************************
2335  Set the handle state.
2336 ****************************************************************************/
2337
2338 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2339                                    const char *pipe_name, uint16 device_state)
2340 {
2341         bool state_set = False;
2342         char param[2];
2343         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2344         char *rparam = NULL;
2345         char *rdata = NULL;
2346         uint32 rparam_len, rdata_len;
2347
2348         if (pipe_name == NULL)
2349                 return False;
2350
2351         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2352                  cli->fnum, pipe_name, device_state));
2353
2354         /* create parameters: device state */
2355         SSVAL(param, 0, device_state);
2356
2357         /* create setup parameters. */
2358         setup[0] = 0x0001; 
2359         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2360
2361         /* send the data on \PIPE\ */
2362         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2363                     setup, 2, 0,                /* setup, length, max */
2364                     param, 2, 0,                /* param, length, max */
2365                     NULL, 0, 1024,              /* data, length, max */
2366                     &rparam, &rparam_len,        /* return param, length */
2367                     &rdata, &rdata_len))         /* return data, length */
2368         {
2369                 DEBUG(5, ("Set Handle state: return OK\n"));
2370                 state_set = True;
2371         }
2372
2373         SAFE_FREE(rparam);
2374         SAFE_FREE(rdata);
2375
2376         return state_set;
2377 }
2378 #endif
2379
2380 /****************************************************************************
2381  Check the rpc bind acknowledge response.
2382 ****************************************************************************/
2383
2384 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2385 {
2386         if ( hdr_ba->addr.len == 0) {
2387                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2388         }
2389
2390         /* check the transfer syntax */
2391         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2392              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2393                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2394                 return False;
2395         }
2396
2397         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2398                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2399                           hdr_ba->res.num_results, hdr_ba->res.reason));
2400         }
2401
2402         DEBUG(5,("check_bind_response: accepted!\n"));
2403         return True;
2404 }
2405
2406 /*******************************************************************
2407  Creates a DCE/RPC bind authentication response.
2408  This is the packet that is sent back to the server once we
2409  have received a BIND-ACK, to finish the third leg of
2410  the authentication handshake.
2411  ********************************************************************/
2412
2413 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2414                                 uint32 rpc_call_id,
2415                                 enum pipe_auth_type auth_type,
2416                                 enum pipe_auth_level auth_level,
2417                                 DATA_BLOB *pauth_blob,
2418                                 prs_struct *rpc_out)
2419 {
2420         RPC_HDR hdr;
2421         RPC_HDR_AUTH hdr_auth;
2422         uint32 pad = 0;
2423
2424         /* Create the request RPC_HDR */
2425         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2426                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2427                      pauth_blob->length );
2428
2429         /* Marshall it. */
2430         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2431                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2432                 return NT_STATUS_NO_MEMORY;
2433         }
2434
2435         /*
2436                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2437                 about padding - shouldn't this pad to length 8 ? JRA.
2438         */
2439
2440         /* 4 bytes padding. */
2441         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2442                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2443                 return NT_STATUS_NO_MEMORY;
2444         }
2445
2446         /* Create the request RPC_HDR_AUTHA */
2447         init_rpc_hdr_auth(&hdr_auth,
2448                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2449                         auth_level, 0, 1);
2450
2451         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2452                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2453                 return NT_STATUS_NO_MEMORY;
2454         }
2455
2456         /*
2457          * Append the auth data to the outgoing buffer.
2458          */
2459
2460         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2461                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2462                 return NT_STATUS_NO_MEMORY;
2463         }
2464
2465         return NT_STATUS_OK;
2466 }
2467
2468 /*******************************************************************
2469  Creates a DCE/RPC bind alter context authentication request which
2470  may contain a spnego auth blobl
2471  ********************************************************************/
2472
2473 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2474                                         const RPC_IFACE *abstract,
2475                                         const RPC_IFACE *transfer,
2476                                         enum pipe_auth_level auth_level,
2477                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2478                                         prs_struct *rpc_out)
2479 {
2480         RPC_HDR_AUTH hdr_auth;
2481         prs_struct auth_info;
2482         NTSTATUS ret = NT_STATUS_OK;
2483
2484         ZERO_STRUCT(hdr_auth);
2485         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2486                 return NT_STATUS_NO_MEMORY;
2487
2488         /* We may change the pad length before marshalling. */
2489         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2490
2491         if (pauth_blob->length) {
2492                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2493                         prs_mem_free(&auth_info);
2494                         return NT_STATUS_NO_MEMORY;
2495                 }
2496         }
2497
2498         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2499                                                 rpc_out, 
2500                                                 rpc_call_id,
2501                                                 abstract,
2502                                                 transfer,
2503                                                 &hdr_auth,
2504                                                 &auth_info);
2505         prs_mem_free(&auth_info);
2506         return ret;
2507 }
2508
2509 /****************************************************************************
2510  Do an rpc bind.
2511 ****************************************************************************/
2512
2513 struct rpc_pipe_bind_state {
2514         struct event_context *ev;
2515         struct rpc_pipe_client *cli;
2516         prs_struct rpc_out;
2517         uint32_t rpc_call_id;
2518 };
2519
2520 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2521 {
2522         prs_mem_free(&state->rpc_out);
2523         return 0;
2524 }
2525
2526 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2527 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2528                                            struct rpc_pipe_bind_state *state,
2529                                            struct rpc_hdr_info *phdr,
2530                                            prs_struct *reply_pdu);
2531 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2532 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2533                                                     struct rpc_pipe_bind_state *state,
2534                                                     struct rpc_hdr_info *phdr,
2535                                                     prs_struct *reply_pdu);
2536 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2537
2538 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2539                                      struct event_context *ev,
2540                                      struct rpc_pipe_client *cli,
2541                                      struct cli_pipe_auth_data *auth)
2542 {
2543         struct async_req *result, *subreq;
2544         struct rpc_pipe_bind_state *state;
2545         NTSTATUS status;
2546
2547         if (!async_req_setup(mem_ctx, &result, &state,
2548                              struct rpc_pipe_bind_state)) {
2549                 return NULL;
2550         }
2551
2552         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2553                 rpccli_pipe_txt(debug_ctx(), cli),
2554                 (unsigned int)auth->auth_type,
2555                 (unsigned int)auth->auth_level ));
2556
2557         state->ev = ev;
2558         state->cli = cli;
2559         state->rpc_call_id = get_rpc_call_id();
2560
2561         prs_init_empty(&state->rpc_out, state, MARSHALL);
2562         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2563
2564         cli->auth = talloc_move(cli, &auth);
2565
2566         /* Marshall the outgoing data. */
2567         status = create_rpc_bind_req(cli, &state->rpc_out,
2568                                      state->rpc_call_id,
2569                                      &cli->abstract_syntax,
2570                                      &cli->transfer_syntax,
2571                                      cli->auth->auth_type,
2572                                      cli->auth->auth_level);
2573
2574         if (!NT_STATUS_IS_OK(status)) {
2575                 goto post_status;
2576         }
2577
2578         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2579                                    RPC_BINDACK);
2580         if (subreq == NULL) {
2581                 status = NT_STATUS_NO_MEMORY;
2582                 goto post_status;
2583         }
2584         subreq->async.fn = rpc_pipe_bind_step_one_done;
2585         subreq->async.priv = result;
2586         return result;
2587
2588  post_status:
2589         if (async_post_ntstatus(result, ev, status)) {
2590                 return result;
2591         }
2592         TALLOC_FREE(result);
2593         return NULL;
2594 }
2595
2596 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2597 {
2598         struct async_req *req = talloc_get_type_abort(
2599                 subreq->async.priv, struct async_req);
2600         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2601                 req->private_data, struct rpc_pipe_bind_state);
2602         prs_struct reply_pdu;
2603         struct rpc_hdr_info hdr;
2604         struct rpc_hdr_ba_info hdr_ba;
2605         NTSTATUS status;
2606
2607         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2608         TALLOC_FREE(subreq);
2609         if (!NT_STATUS_IS_OK(status)) {
2610                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2611                           rpccli_pipe_txt(debug_ctx(), state->cli),
2612                           nt_errstr(status)));
2613                 async_req_nterror(req, status);
2614                 return;
2615         }
2616
2617         /* Unmarshall the RPC header */
2618         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2619                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2620                 prs_mem_free(&reply_pdu);
2621                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2622                 return;
2623         }
2624
2625         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2626                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2627                           "RPC_HDR_BA.\n"));
2628                 prs_mem_free(&reply_pdu);
2629                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2630                 return;
2631         }
2632
2633         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2634                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2635                 prs_mem_free(&reply_pdu);
2636                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2637                 return;
2638         }
2639
2640         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2641         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2642
2643         /*
2644          * For authenticated binds we may need to do 3 or 4 leg binds.
2645          */
2646
2647         switch(state->cli->auth->auth_type) {
2648
2649         case PIPE_AUTH_TYPE_NONE:
2650         case PIPE_AUTH_TYPE_SCHANNEL:
2651                 /* Bind complete. */
2652                 prs_mem_free(&reply_pdu);
2653                 async_req_done(req);
2654                 break;
2655
2656         case PIPE_AUTH_TYPE_NTLMSSP:
2657                 /* Need to send AUTH3 packet - no reply. */
2658                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2659                                                     &reply_pdu);
2660                 prs_mem_free(&reply_pdu);
2661                 if (!NT_STATUS_IS_OK(status)) {
2662                         async_req_nterror(req, status);
2663                 }
2664                 break;
2665
2666         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2667                 /* Need to send alter context request and reply. */
2668                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2669                                                              &reply_pdu);
2670                 prs_mem_free(&reply_pdu);
2671                 if (!NT_STATUS_IS_OK(status)) {
2672                         async_req_nterror(req, status);
2673                 }
2674                 break;
2675
2676         case PIPE_AUTH_TYPE_KRB5:
2677                 /* */
2678
2679         default:
2680                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2681                          (unsigned int)state->cli->auth->auth_type));
2682                 prs_mem_free(&reply_pdu);
2683                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2684         }
2685 }
2686
2687 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2688                                            struct rpc_pipe_bind_state *state,
2689                                            struct rpc_hdr_info *phdr,
2690                                            prs_struct *reply_pdu)
2691 {
2692         DATA_BLOB server_response = data_blob_null;
2693         DATA_BLOB client_reply = data_blob_null;
2694         struct rpc_hdr_auth_info hdr_auth;
2695         struct async_req *subreq;
2696         NTSTATUS status;
2697
2698         if ((phdr->auth_len == 0)
2699             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2700                 return NT_STATUS_INVALID_PARAMETER;
2701         }
2702
2703         if (!prs_set_offset(
2704                     reply_pdu,
2705                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2706                 return NT_STATUS_INVALID_PARAMETER;
2707         }
2708
2709         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2710                 return NT_STATUS_INVALID_PARAMETER;
2711         }
2712
2713         /* TODO - check auth_type/auth_level match. */
2714
2715         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2716         prs_copy_data_out((char *)server_response.data, reply_pdu,
2717                           phdr->auth_len);
2718
2719         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2720                                 server_response, &client_reply);
2721
2722         if (!NT_STATUS_IS_OK(status)) {
2723                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2724                           "blob failed: %s.\n", nt_errstr(status)));
2725                 return status;
2726         }
2727
2728         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2729
2730         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2731                                        state->cli->auth->auth_type,
2732                                        state->cli->auth->auth_level,
2733                                        &client_reply, &state->rpc_out);
2734         data_blob_free(&client_reply);
2735
2736         if (!NT_STATUS_IS_OK(status)) {
2737                 return status;
2738         }
2739
2740         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2741                                 (uint8_t *)prs_data_p(&state->rpc_out),
2742                                 prs_offset(&state->rpc_out));
2743         if (subreq == NULL) {
2744                 return NT_STATUS_NO_MEMORY;
2745         }
2746         subreq->async.fn = rpc_bind_auth3_write_done;
2747         subreq->async.priv = req;
2748         return NT_STATUS_OK;
2749 }
2750
2751 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2752 {
2753         struct async_req *req = talloc_get_type_abort(
2754                 subreq->async.priv, struct async_req);
2755         NTSTATUS status;
2756
2757         status = rpc_write_recv(subreq);
2758         TALLOC_FREE(subreq);
2759         if (!NT_STATUS_IS_OK(status)) {
2760                 async_req_nterror(req, status);
2761                 return;
2762         }
2763         async_req_done(req);
2764 }
2765
2766 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2767                                                     struct rpc_pipe_bind_state *state,
2768                                                     struct rpc_hdr_info *phdr,
2769                                                     prs_struct *reply_pdu)
2770 {
2771         DATA_BLOB server_spnego_response = data_blob_null;
2772         DATA_BLOB server_ntlm_response = data_blob_null;
2773         DATA_BLOB client_reply = data_blob_null;
2774         DATA_BLOB tmp_blob = data_blob_null;
2775         RPC_HDR_AUTH hdr_auth;
2776         struct async_req *subreq;
2777         NTSTATUS status;
2778
2779         if ((phdr->auth_len == 0)
2780             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2781                 return NT_STATUS_INVALID_PARAMETER;
2782         }
2783
2784         /* Process the returned NTLMSSP blob first. */
2785         if (!prs_set_offset(
2786                     reply_pdu,
2787                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2788                 return NT_STATUS_INVALID_PARAMETER;
2789         }
2790
2791         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2792                 return NT_STATUS_INVALID_PARAMETER;
2793         }
2794
2795         server_spnego_response = data_blob(NULL, phdr->auth_len);
2796         prs_copy_data_out((char *)server_spnego_response.data,
2797                           reply_pdu, phdr->auth_len);
2798
2799         /*
2800          * The server might give us back two challenges - tmp_blob is for the
2801          * second.
2802          */
2803         if (!spnego_parse_challenge(server_spnego_response,
2804                                     &server_ntlm_response, &tmp_blob)) {
2805                 data_blob_free(&server_spnego_response);
2806                 data_blob_free(&server_ntlm_response);
2807                 data_blob_free(&tmp_blob);
2808                 return NT_STATUS_INVALID_PARAMETER;
2809         }
2810
2811         /* We're finished with the server spnego response and the tmp_blob. */
2812         data_blob_free(&server_spnego_response);
2813         data_blob_free(&tmp_blob);
2814
2815         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2816                                 server_ntlm_response, &client_reply);
2817
2818         /* Finished with the server_ntlm response */
2819         data_blob_free(&server_ntlm_response);
2820
2821         if (!NT_STATUS_IS_OK(status)) {
2822                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2823                           "using server blob failed.\n"));
2824                 data_blob_free(&client_reply);
2825                 return status;
2826         }
2827
2828         /* SPNEGO wrap the client reply. */
2829         tmp_blob = spnego_gen_auth(client_reply);
2830         data_blob_free(&client_reply);
2831         client_reply = tmp_blob;
2832         tmp_blob = data_blob_null;
2833
2834         /* Now prepare the alter context pdu. */
2835         prs_init_empty(&state->rpc_out, state, MARSHALL);
2836
2837         status = create_rpc_alter_context(state->rpc_call_id,
2838                                           &state->cli->abstract_syntax,
2839                                           &state->cli->transfer_syntax,
2840                                           state->cli->auth->auth_level,
2841                                           &client_reply,
2842                                           &state->rpc_out);
2843         data_blob_free(&client_reply);
2844
2845         if (!NT_STATUS_IS_OK(status)) {
2846                 return status;
2847         }
2848
2849         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2850                                    &state->rpc_out, RPC_ALTCONTRESP);
2851         if (subreq == NULL) {
2852                 return NT_STATUS_NO_MEMORY;
2853         }
2854         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2855         subreq->async.priv = req;
2856         return NT_STATUS_OK;
2857 }
2858
2859 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2860 {
2861         struct async_req *req = talloc_get_type_abort(
2862                 subreq->async.priv, struct async_req);
2863         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2864                 req->private_data, struct rpc_pipe_bind_state);
2865         DATA_BLOB server_spnego_response = data_blob_null;
2866         DATA_BLOB tmp_blob = data_blob_null;
2867         prs_struct reply_pdu;
2868         struct rpc_hdr_info hdr;
2869         struct rpc_hdr_auth_info hdr_auth;
2870         NTSTATUS status;
2871
2872         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2873         TALLOC_FREE(subreq);
2874         if (!NT_STATUS_IS_OK(status)) {
2875                 async_req_nterror(req, status);
2876                 return;
2877         }
2878
2879         /* Get the auth blob from the reply. */
2880         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2881                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2882                           "unmarshall RPC_HDR.\n"));
2883                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2884                 return;
2885         }
2886
2887         if (!prs_set_offset(
2888                     &reply_pdu,
2889                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2890                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2891                 return;
2892         }
2893
2894         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2895                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2896                 return;
2897         }
2898
2899         server_spnego_response = data_blob(NULL, hdr.auth_len);
2900         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2901                           hdr.auth_len);
2902
2903         /* Check we got a valid auth response. */
2904         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2905                                         OID_NTLMSSP, &tmp_blob)) {
2906                 data_blob_free(&server_spnego_response);
2907                 data_blob_free(&tmp_blob);
2908                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2909                 return;
2910         }
2911
2912         data_blob_free(&server_spnego_response);
2913         data_blob_free(&tmp_blob);
2914
2915         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2916                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2917         async_req_done(req);
2918 }
2919
2920 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2921 {
2922         return async_req_simple_recv_ntstatus(req);
2923 }
2924
2925 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2926                        struct cli_pipe_auth_data *auth)
2927 {
2928         TALLOC_CTX *frame = talloc_stackframe();
2929         struct event_context *ev;
2930         struct async_req *req;
2931         NTSTATUS status = NT_STATUS_NO_MEMORY;
2932
2933         ev = event_context_init(frame);
2934         if (ev == NULL) {
2935                 goto fail;
2936         }
2937
2938         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2939         if (req == NULL) {
2940                 goto fail;
2941         }
2942
2943         while (req->state < ASYNC_REQ_DONE) {
2944                 event_loop_once(ev);
2945         }
2946
2947         status = rpc_pipe_bind_recv(req);
2948  fail:
2949         TALLOC_FREE(frame);
2950         return status;
2951 }
2952
2953 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2954                                 unsigned int timeout)
2955 {
2956         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2957
2958         if (cli == NULL) {
2959                 return 0;
2960         }
2961         return cli_set_timeout(cli, timeout);
2962 }
2963
2964 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2965 {
2966         struct cli_state *cli;
2967
2968         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2969             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2970                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2971                 return true;
2972         }
2973
2974         cli = rpc_pipe_np_smb_conn(rpc_cli);
2975         if (cli == NULL) {
2976                 return false;
2977         }
2978         E_md4hash(cli->password ? cli->password : "", nt_hash);
2979         return true;
2980 }
2981
2982 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2983                                struct cli_pipe_auth_data **presult)
2984 {
2985         struct cli_pipe_auth_data *result;
2986
2987         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2988         if (result == NULL) {
2989                 return NT_STATUS_NO_MEMORY;
2990         }
2991
2992         result->auth_type = PIPE_AUTH_TYPE_NONE;
2993         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2994
2995         result->user_name = talloc_strdup(result, "");
2996         result->domain = talloc_strdup(result, "");
2997         if ((result->user_name == NULL) || (result->domain == NULL)) {
2998                 TALLOC_FREE(result);
2999                 return NT_STATUS_NO_MEMORY;
3000         }
3001
3002         *presult = result;
3003         return NT_STATUS_OK;
3004 }
3005
3006 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3007 {
3008         ntlmssp_end(&auth->a_u.ntlmssp_state);
3009         return 0;
3010 }
3011
3012 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3013                                   enum pipe_auth_type auth_type,
3014                                   enum pipe_auth_level auth_level,
3015                                   const char *domain,
3016                                   const char *username,
3017                                   const char *password,
3018                                   struct cli_pipe_auth_data **presult)
3019 {
3020         struct cli_pipe_auth_data *result;
3021         NTSTATUS status;
3022
3023         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3024         if (result == NULL) {
3025                 return NT_STATUS_NO_MEMORY;
3026         }
3027
3028         result->auth_type = auth_type;
3029         result->auth_level = auth_level;
3030
3031         result->user_name = talloc_strdup(result, username);
3032         result->domain = talloc_strdup(result, domain);
3033         if ((result->user_name == NULL) || (result->domain == NULL)) {
3034                 status = NT_STATUS_NO_MEMORY;
3035                 goto fail;
3036         }
3037
3038         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3039         if (!NT_STATUS_IS_OK(status)) {
3040                 goto fail;
3041         }
3042
3043         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3044
3045         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3046         if (!NT_STATUS_IS_OK(status)) {
3047                 goto fail;
3048         }
3049
3050         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3051         if (!NT_STATUS_IS_OK(status)) {
3052                 goto fail;
3053         }
3054
3055         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3056         if (!NT_STATUS_IS_OK(status)) {
3057                 goto fail;
3058         }
3059
3060         /*
3061          * Turn off sign+seal to allow selected auth level to turn it back on.
3062          */
3063         result->a_u.ntlmssp_state->neg_flags &=
3064                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3065
3066         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3067                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3068         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3069                 result->a_u.ntlmssp_state->neg_flags
3070                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3071         }
3072
3073         *presult = result;
3074         return NT_STATUS_OK;
3075
3076  fail:
3077         TALLOC_FREE(result);
3078         return status;
3079 }
3080
3081 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3082                                    enum pipe_auth_level auth_level,
3083                                    const uint8_t sess_key[16],
3084                                    struct cli_pipe_auth_data **presult)
3085 {
3086         struct cli_pipe_auth_data *result;
3087
3088         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3089         if (result == NULL) {
3090                 return NT_STATUS_NO_MEMORY;
3091         }
3092
3093         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3094         result->auth_level = auth_level;
3095
3096         result->user_name = talloc_strdup(result, "");
3097         result->domain = talloc_strdup(result, domain);
3098         if ((result->user_name == NULL) || (result->domain == NULL)) {
3099                 goto fail;
3100         }
3101
3102         result->a_u.schannel_auth = talloc(result,
3103                                            struct schannel_auth_struct);
3104         if (result->a_u.schannel_auth == NULL) {
3105                 goto fail;
3106         }
3107
3108         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3109                sizeof(result->a_u.schannel_auth->sess_key));
3110         result->a_u.schannel_auth->seq_num = 0;
3111
3112         *presult = result;
3113         return NT_STATUS_OK;
3114
3115  fail:
3116         TALLOC_FREE(result);
3117         return NT_STATUS_NO_MEMORY;
3118 }
3119
3120 #ifdef HAVE_KRB5
3121 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3122 {
3123         data_blob_free(&auth->session_key);
3124         return 0;
3125 }
3126 #endif
3127
3128 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3129                                    enum pipe_auth_level auth_level,
3130                                    const char *service_princ,
3131                                    const char *username,
3132                                    const char *password,
3133                                    struct cli_pipe_auth_data **presult)
3134 {
3135 #ifdef HAVE_KRB5
3136         struct cli_pipe_auth_data *result;
3137
3138         if ((username != NULL) && (password != NULL)) {
3139                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3140                 if (ret != 0) {
3141                         return NT_STATUS_ACCESS_DENIED;
3142                 }
3143         }
3144
3145         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3146         if (result == NULL) {
3147                 return NT_STATUS_NO_MEMORY;
3148         }
3149
3150         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3151         result->auth_level = auth_level;
3152
3153         /*
3154          * Username / domain need fixing!
3155          */
3156         result->user_name = talloc_strdup(result, "");
3157         result->domain = talloc_strdup(result, "");
3158         if ((result->user_name == NULL) || (result->domain == NULL)) {
3159                 goto fail;
3160         }
3161
3162         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3163                 result, struct kerberos_auth_struct);
3164         if (result->a_u.kerberos_auth == NULL) {
3165                 goto fail;
3166         }
3167         talloc_set_destructor(result->a_u.kerberos_auth,
3168                               cli_auth_kerberos_data_destructor);
3169
3170         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3171                 result, service_princ);
3172         if (result->a_u.kerberos_auth->service_principal == NULL) {
3173                 goto fail;
3174         }
3175
3176         *presult = result;
3177         return NT_STATUS_OK;
3178
3179  fail:
3180         TALLOC_FREE(result);
3181         return NT_STATUS_NO_MEMORY;
3182 #else
3183         return NT_STATUS_NOT_SUPPORTED;
3184 #endif
3185 }
3186
3187 /**
3188  * Create an rpc pipe client struct, connecting to a tcp port.
3189  */
3190 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3191                                        uint16_t port,
3192                                        const struct ndr_syntax_id *abstract_syntax,
3193                                        struct rpc_pipe_client **presult)
3194 {
3195         struct rpc_pipe_client *result;
3196         struct sockaddr_storage addr;
3197         NTSTATUS status;
3198         int fd;
3199
3200         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3201         if (result == NULL) {
3202                 return NT_STATUS_NO_MEMORY;
3203         }
3204
3205         result->abstract_syntax = *abstract_syntax;
3206         result->transfer_syntax = ndr_transfer_syntax;
3207         result->dispatch = cli_do_rpc_ndr;
3208
3209         result->desthost = talloc_strdup(result, host);
3210         result->srv_name_slash = talloc_asprintf_strupper_m(
3211                 result, "\\\\%s", result->desthost);
3212         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3213                 status = NT_STATUS_NO_MEMORY;
3214                 goto fail;
3215         }
3216
3217         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3218         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3219
3220         if (!resolve_name(host, &addr, 0)) {
3221                 status = NT_STATUS_NOT_FOUND;
3222                 goto fail;
3223         }
3224
3225         status = open_socket_out(&addr, port, 60, &fd);
3226         if (!NT_STATUS_IS_OK(status)) {
3227                 goto fail;
3228         }
3229         set_socket_options(fd, lp_socket_options());
3230
3231         status = rpc_transport_sock_init(result, fd, &result->transport);
3232         if (!NT_STATUS_IS_OK(status)) {
3233                 close(fd);
3234                 goto fail;
3235         }
3236
3237         result->transport->transport = NCACN_IP_TCP;
3238
3239         *presult = result;
3240         return NT_STATUS_OK;
3241
3242  fail:
3243         TALLOC_FREE(result);
3244         return status;
3245 }
3246
3247 /**
3248  * Determine the tcp port on which a dcerpc interface is listening
3249  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3250  * target host.
3251  */
3252 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3253                                       const struct ndr_syntax_id *abstract_syntax,
3254                                       uint16_t *pport)
3255 {
3256         NTSTATUS status;
3257         struct rpc_pipe_client *epm_pipe = NULL;
3258         struct cli_pipe_auth_data *auth = NULL;
3259         struct dcerpc_binding *map_binding = NULL;
3260         struct dcerpc_binding *res_binding = NULL;
3261         struct epm_twr_t *map_tower = NULL;
3262         struct epm_twr_t *res_towers = NULL;
3263         struct policy_handle *entry_handle = NULL;
3264         uint32_t num_towers = 0;
3265         uint32_t max_towers = 1;
3266         struct epm_twr_p_t towers;
3267         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3268
3269         if (pport == NULL) {
3270                 status = NT_STATUS_INVALID_PARAMETER;
3271                 goto done;
3272         }
3273
3274         /* open the connection to the endpoint mapper */
3275         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3276                                         &ndr_table_epmapper.syntax_id,
3277                                         &epm_pipe);
3278
3279         if (!NT_STATUS_IS_OK(status)) {
3280                 goto done;
3281         }
3282
3283         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3284         if (!NT_STATUS_IS_OK(status)) {
3285                 goto done;
3286         }
3287
3288         status = rpc_pipe_bind(epm_pipe, auth);
3289         if (!NT_STATUS_IS_OK(status)) {
3290                 goto done;
3291         }
3292
3293         /* create tower for asking the epmapper */
3294
3295         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3296         if (map_binding == NULL) {
3297                 status = NT_STATUS_NO_MEMORY;
3298                 goto done;
3299         }
3300
3301         map_binding->transport = NCACN_IP_TCP;
3302         map_binding->object = *abstract_syntax;
3303         map_binding->host = host; /* needed? */
3304         map_binding->endpoint = "0"; /* correct? needed? */
3305
3306         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3307         if (map_tower == NULL) {
3308                 status = NT_STATUS_NO_MEMORY;
3309                 goto done;
3310         }
3311
3312         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3313                                             &(map_tower->tower));
3314         if (!NT_STATUS_IS_OK(status)) {
3315                 goto done;
3316         }
3317
3318         /* allocate further parameters for the epm_Map call */
3319
3320         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3321         if (res_towers == NULL) {
3322                 status = NT_STATUS_NO_MEMORY;
3323                 goto done;
3324         }
3325         towers.twr = res_towers;
3326
3327         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3328         if (entry_handle == NULL) {
3329                 status = NT_STATUS_NO_MEMORY;
3330                 goto done;
3331         }
3332
3333         /* ask the endpoint mapper for the port */
3334
3335         status = rpccli_epm_Map(epm_pipe,
3336                                 tmp_ctx,
3337                                 CONST_DISCARD(struct GUID *,
3338                                               &(abstract_syntax->uuid)),
3339                                 map_tower,
3340                                 entry_handle,
3341                                 max_towers,
3342                                 &num_towers,
3343                                 &towers);
3344
3345         if (!NT_STATUS_IS_OK(status)) {
3346                 goto done;
3347         }
3348
3349         if (num_towers != 1) {
3350                 status = NT_STATUS_UNSUCCESSFUL;
3351                 goto done;
3352         }
3353
3354         /* extract the port from the answer */
3355
3356         status = dcerpc_binding_from_tower(tmp_ctx,
3357                                            &(towers.twr->tower),
3358                                            &res_binding);
3359         if (!NT_STATUS_IS_OK(status)) {
3360                 goto done;
3361         }
3362
3363         /* are further checks here necessary? */
3364         if (res_binding->transport != NCACN_IP_TCP) {
3365                 status = NT_STATUS_UNSUCCESSFUL;
3366                 goto done;
3367         }
3368
3369         *pport = (uint16_t)atoi(res_binding->endpoint);
3370
3371 done:
3372         TALLOC_FREE(tmp_ctx);
3373         return status;
3374 }
3375
3376 /**
3377  * Create a rpc pipe client struct, connecting to a host via tcp.
3378  * The port is determined by asking the endpoint mapper on the given
3379  * host.
3380  */
3381 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3382                            const struct ndr_syntax_id *abstract_syntax,
3383                            struct rpc_pipe_client **presult)
3384 {
3385         NTSTATUS status;
3386         uint16_t port = 0;
3387
3388         *presult = NULL;
3389
3390         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3391         if (!NT_STATUS_IS_OK(status)) {
3392                 goto done;
3393         }
3394
3395         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3396                                         abstract_syntax, presult);
3397
3398 done:
3399         return status;
3400 }
3401
3402 /********************************************************************
3403  Create a rpc pipe client struct, connecting to a unix domain socket
3404  ********************************************************************/
3405 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3406                                const struct ndr_syntax_id *abstract_syntax,
3407                                struct rpc_pipe_client **presult)
3408 {
3409         struct rpc_pipe_client *result;
3410         struct sockaddr_un addr;
3411         NTSTATUS status;
3412         int fd;
3413
3414         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3415         if (result == NULL) {
3416                 return NT_STATUS_NO_MEMORY;
3417         }
3418
3419         result->abstract_syntax = *abstract_syntax;
3420         result->transfer_syntax = ndr_transfer_syntax;
3421         result->dispatch = cli_do_rpc_ndr;
3422
3423         result->desthost = get_myname(result);
3424         result->srv_name_slash = talloc_asprintf_strupper_m(
3425                 result, "\\\\%s", result->desthost);
3426         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3427                 status = NT_STATUS_NO_MEMORY;
3428                 goto fail;
3429         }
3430
3431         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3432         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3433
3434         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3435         if (fd == -1) {
3436                 status = map_nt_error_from_unix(errno);
3437                 goto fail;
3438         }
3439
3440         ZERO_STRUCT(addr);
3441         addr.sun_family = AF_UNIX;
3442         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3443
3444         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3445                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3446                           strerror(errno)));
3447                 close(fd);
3448                 return map_nt_error_from_unix(errno);
3449         }
3450
3451         status = rpc_transport_sock_init(result, fd, &result->transport);
3452         if (!NT_STATUS_IS_OK(status)) {
3453                 close(fd);
3454                 goto fail;
3455         }
3456
3457         result->transport->transport = NCALRPC;
3458
3459         *presult = result;
3460         return NT_STATUS_OK;
3461
3462  fail:
3463         TALLOC_FREE(result);
3464         return status;
3465 }
3466
3467 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3468 {
3469         struct cli_state *cli;
3470
3471         cli = rpc_pipe_np_smb_conn(p);
3472         if (cli != NULL) {
3473                 DLIST_REMOVE(cli->pipe_list, p);
3474         }
3475         return 0;
3476 }
3477
3478 /****************************************************************************
3479  Open a named pipe over SMB to a remote server.
3480  *
3481  * CAVEAT CALLER OF THIS FUNCTION:
3482  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3483  *    so be sure that this function is called AFTER any structure (vs pointer)
3484  *    assignment of the cli.  In particular, libsmbclient does structure
3485  *    assignments of cli, which invalidates the data in the returned
3486  *    rpc_pipe_client if this function is called before the structure assignment
3487  *    of cli.
3488  * 
3489  ****************************************************************************/
3490
3491 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3492                                  const struct ndr_syntax_id *abstract_syntax,
3493                                  struct rpc_pipe_client **presult)
3494 {
3495         struct rpc_pipe_client *result;
3496         NTSTATUS status;
3497
3498         /* sanity check to protect against crashes */
3499
3500         if ( !cli ) {
3501                 return NT_STATUS_INVALID_HANDLE;
3502         }
3503
3504         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3505         if (result == NULL) {
3506                 return NT_STATUS_NO_MEMORY;
3507         }
3508
3509         result->abstract_syntax = *abstract_syntax;
3510         result->transfer_syntax = ndr_transfer_syntax;
3511         result->dispatch = cli_do_rpc_ndr;
3512         result->desthost = talloc_strdup(result, cli->desthost);
3513         result->srv_name_slash = talloc_asprintf_strupper_m(
3514                 result, "\\\\%s", result->desthost);
3515
3516         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3517         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3518
3519         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3520                 TALLOC_FREE(result);
3521                 return NT_STATUS_NO_MEMORY;
3522         }
3523
3524         status = rpc_transport_np_init(result, cli, abstract_syntax,
3525                                        &result->transport);
3526         if (!NT_STATUS_IS_OK(status)) {
3527                 TALLOC_FREE(result);
3528                 return status;
3529         }
3530
3531         result->transport->transport = NCACN_NP;
3532
3533         DLIST_ADD(cli->pipe_list, result);
3534         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3535
3536         *presult = result;
3537         return NT_STATUS_OK;
3538 }
3539
3540 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3541                              struct rpc_cli_smbd_conn *conn,
3542                              const struct ndr_syntax_id *syntax,
3543                              struct rpc_pipe_client **presult)
3544 {
3545         struct rpc_pipe_client *result;
3546         struct cli_pipe_auth_data *auth;
3547         NTSTATUS status;
3548
3549         result = talloc(mem_ctx, struct rpc_pipe_client);
3550         if (result == NULL) {
3551                 return NT_STATUS_NO_MEMORY;
3552         }
3553         result->abstract_syntax = *syntax;
3554         result->transfer_syntax = ndr_transfer_syntax;
3555         result->dispatch = cli_do_rpc_ndr;
3556         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3557         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3558
3559         result->desthost = talloc_strdup(result, global_myname());
3560         result->srv_name_slash = talloc_asprintf_strupper_m(
3561                 result, "\\\\%s", global_myname());
3562         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3563                 TALLOC_FREE(result);
3564                 return NT_STATUS_NO_MEMORY;
3565         }
3566
3567         status = rpc_transport_smbd_init(result, conn, syntax,
3568                                          &result->transport);
3569         if (!NT_STATUS_IS_OK(status)) {
3570                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3571                           nt_errstr(status)));
3572                 TALLOC_FREE(result);
3573                 return status;
3574         }
3575
3576         status = rpccli_anon_bind_data(result, &auth);
3577         if (!NT_STATUS_IS_OK(status)) {
3578                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3579                           nt_errstr(status)));
3580                 TALLOC_FREE(result);
3581                 return status;
3582         }
3583
3584         status = rpc_pipe_bind(result, auth);
3585         if (!NT_STATUS_IS_OK(status)) {
3586                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3587                 TALLOC_FREE(result);
3588                 return status;
3589         }
3590
3591         result->transport->transport = NCACN_INTERNAL;
3592
3593         *presult = result;
3594         return NT_STATUS_OK;
3595 }
3596
3597 /****************************************************************************
3598  Open a pipe to a remote server.
3599  ****************************************************************************/
3600
3601 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3602                                   enum dcerpc_transport_t transport,
3603                                   const struct ndr_syntax_id *interface,
3604                                   struct rpc_pipe_client **presult)
3605 {
3606         switch (transport) {
3607         case NCACN_IP_TCP:
3608                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3609                                          presult);
3610         case NCACN_NP:
3611                 return rpc_pipe_open_np(cli, interface, presult);
3612         default:
3613                 return NT_STATUS_NOT_IMPLEMENTED;
3614         }
3615 }
3616
3617 /****************************************************************************
3618  Open a named pipe to an SMB server and bind anonymously.
3619  ****************************************************************************/
3620
3621 NTSTATUS cli_rpc_pipe_open_noauth_transport(struct cli_state *cli,
3622                                             enum dcerpc_transport_t transport,
3623                                             const struct ndr_syntax_id *interface,
3624                                             struct rpc_pipe_client **presult)
3625 {
3626         struct rpc_pipe_client *result;
3627         struct cli_pipe_auth_data *auth;
3628         NTSTATUS status;
3629
3630         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3631         if (!NT_STATUS_IS_OK(status)) {
3632                 return status;
3633         }
3634
3635         status = rpccli_anon_bind_data(result, &auth);
3636         if (!NT_STATUS_IS_OK(status)) {
3637                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3638                           nt_errstr(status)));
3639                 TALLOC_FREE(result);
3640                 return status;
3641         }
3642
3643         /*
3644          * This is a bit of an abstraction violation due to the fact that an
3645          * anonymous bind on an authenticated SMB inherits the user/domain
3646          * from the enclosing SMB creds
3647          */
3648
3649         TALLOC_FREE(auth->user_name);
3650         TALLOC_FREE(auth->domain);
3651
3652         auth->user_name = talloc_strdup(auth, cli->user_name);
3653         auth->domain = talloc_strdup(auth, cli->domain);
3654         auth->user_session_key = data_blob_talloc(auth,
3655                 cli->user_session_key.data,
3656                 cli->user_session_key.length);
3657
3658         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3659                 TALLOC_FREE(result);
3660                 return NT_STATUS_NO_MEMORY;
3661         }
3662
3663         status = rpc_pipe_bind(result, auth);
3664         if (!NT_STATUS_IS_OK(status)) {
3665                 int lvl = 0;
3666                 if (ndr_syntax_id_equal(interface,
3667                                         &ndr_table_dssetup.syntax_id)) {
3668                         /* non AD domains just don't have this pipe, avoid
3669                          * level 0 statement in that case - gd */
3670                         lvl = 3;
3671                 }
3672                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3673                             "%s failed with error %s\n",
3674                             get_pipe_name_from_iface(interface),
3675                             nt_errstr(status) ));
3676                 TALLOC_FREE(result);
3677                 return status;
3678         }
3679
3680         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3681                   "%s and bound anonymously.\n",
3682                   get_pipe_name_from_iface(interface), cli->desthost));
3683
3684         *presult = result;
3685         return NT_STATUS_OK;
3686 }
3687
3688 /****************************************************************************
3689  ****************************************************************************/
3690
3691 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3692                                   const struct ndr_syntax_id *interface,
3693                                   struct rpc_pipe_client **presult)
3694 {
3695         return cli_rpc_pipe_open_noauth_transport(cli, NCACN_NP,
3696                                                   interface, presult);
3697 }
3698
3699 /****************************************************************************
3700  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3701  ****************************************************************************/
3702
3703 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3704                                                    const struct ndr_syntax_id *interface,
3705                                                    enum dcerpc_transport_t transport,
3706                                                    enum pipe_auth_type auth_type,
3707                                                    enum pipe_auth_level auth_level,
3708                                                    const char *domain,
3709                                                    const char *username,
3710                                                    const char *password,
3711                                                    struct rpc_pipe_client **presult)
3712 {
3713         struct rpc_pipe_client *result;
3714         struct cli_pipe_auth_data *auth;
3715         NTSTATUS status;
3716
3717         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3718         if (!NT_STATUS_IS_OK(status)) {
3719                 return status;
3720         }
3721
3722         status = rpccli_ntlmssp_bind_data(
3723                 result, auth_type, auth_level, domain, username,
3724                 password, &auth);
3725         if (!NT_STATUS_IS_OK(status)) {
3726                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3727                           nt_errstr(status)));
3728                 goto err;
3729         }
3730
3731         status = rpc_pipe_bind(result, auth);
3732         if (!NT_STATUS_IS_OK(status)) {
3733                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3734                         nt_errstr(status) ));
3735                 goto err;
3736         }
3737
3738         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3739                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3740                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3741                   username ));
3742
3743         *presult = result;
3744         return NT_STATUS_OK;
3745
3746   err:
3747
3748         TALLOC_FREE(result);
3749         return status;
3750 }
3751
3752 /****************************************************************************
3753  External interface.
3754  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3755  ****************************************************************************/
3756
3757 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3758                                    const struct ndr_syntax_id *interface,
3759                                    enum dcerpc_transport_t transport,
3760                                    enum pipe_auth_level auth_level,
3761                                    const char *domain,
3762                                    const char *username,
3763                                    const char *password,
3764                                    struct rpc_pipe_client **presult)
3765 {
3766         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3767                                                 interface,
3768                                                 transport,
3769                                                 PIPE_AUTH_TYPE_NTLMSSP,
3770                                                 auth_level,
3771                                                 domain,
3772                                                 username,
3773                                                 password,
3774                                                 presult);
3775 }
3776
3777 /****************************************************************************
3778  External interface.
3779  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3780  ****************************************************************************/
3781
3782 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3783                                           const struct ndr_syntax_id *interface,
3784                                           enum dcerpc_transport_t transport,
3785                                           enum pipe_auth_level auth_level,
3786                                           const char *domain,
3787                                           const char *username,
3788                                           const char *password,
3789                                           struct rpc_pipe_client **presult)
3790 {
3791         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3792                                                 interface,
3793                                                 transport,
3794                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3795                                                 auth_level,
3796                                                 domain,
3797                                                 username,
3798                                                 password,
3799                                                 presult);
3800 }
3801
3802 /****************************************************************************
3803   Get a the schannel session key out of an already opened netlogon pipe.
3804  ****************************************************************************/
3805 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3806                                                 struct cli_state *cli,
3807                                                 const char *domain,
3808                                                 uint32 *pneg_flags)
3809 {
3810         uint32 sec_chan_type = 0;
3811         unsigned char machine_pwd[16];
3812         const char *machine_account;
3813         NTSTATUS status;
3814
3815         /* Get the machine account credentials from secrets.tdb. */
3816         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3817                                &sec_chan_type))
3818         {
3819                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3820                         "trust account password for domain '%s'\n",
3821                         domain));
3822                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3823         }
3824
3825         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3826                                         cli->desthost, /* server name */
3827                                         domain,        /* domain */
3828                                         global_myname(), /* client name */
3829                                         machine_account, /* machine account name */
3830                                         machine_pwd,
3831                                         sec_chan_type,
3832                                         pneg_flags);
3833
3834         if (!NT_STATUS_IS_OK(status)) {
3835                 DEBUG(3, ("get_schannel_session_key_common: "
3836                           "rpccli_netlogon_setup_creds failed with result %s "
3837                           "to server %s, domain %s, machine account %s.\n",
3838                           nt_errstr(status), cli->desthost, domain,
3839                           machine_account ));
3840                 return status;
3841         }
3842
3843         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3844                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3845                         cli->desthost));
3846                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3847         }
3848
3849         return NT_STATUS_OK;;
3850 }
3851
3852 /****************************************************************************
3853  Open a netlogon pipe and get the schannel session key.
3854  Now exposed to external callers.
3855  ****************************************************************************/
3856
3857
3858 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3859                                   const char *domain,
3860                                   uint32 *pneg_flags,
3861                                   struct rpc_pipe_client **presult)
3862 {
3863         struct rpc_pipe_client *netlogon_pipe = NULL;
3864         NTSTATUS status;
3865
3866         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3867                                           &netlogon_pipe);
3868         if (!NT_STATUS_IS_OK(status)) {
3869                 return status;
3870         }
3871
3872         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3873                                                  pneg_flags);
3874         if (!NT_STATUS_IS_OK(status)) {
3875                 TALLOC_FREE(netlogon_pipe);
3876                 return status;
3877         }
3878
3879         *presult = netlogon_pipe;
3880         return NT_STATUS_OK;
3881 }
3882
3883 /****************************************************************************
3884  External interface.
3885  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3886  using session_key. sign and seal.
3887  ****************************************************************************/
3888
3889 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3890                                              const struct ndr_syntax_id *interface,
3891                                              enum dcerpc_transport_t transport,
3892                                              enum pipe_auth_level auth_level,
3893                                              const char *domain,
3894                                              const struct dcinfo *pdc,
3895                                              struct rpc_pipe_client **presult)
3896 {
3897         struct rpc_pipe_client *result;
3898         struct cli_pipe_auth_data *auth;
3899         NTSTATUS status;
3900
3901         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3902         if (!NT_STATUS_IS_OK(status)) {
3903                 return status;
3904         }
3905
3906         status = rpccli_schannel_bind_data(result, domain, auth_level,
3907                                            pdc->sess_key, &auth);
3908         if (!NT_STATUS_IS_OK(status)) {
3909                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3910                           nt_errstr(status)));
3911                 TALLOC_FREE(result);
3912                 return status;
3913         }
3914
3915         status = rpc_pipe_bind(result, auth);
3916         if (!NT_STATUS_IS_OK(status)) {
3917                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3918                           "cli_rpc_pipe_bind failed with error %s\n",
3919                           nt_errstr(status) ));
3920                 TALLOC_FREE(result);
3921                 return status;
3922         }
3923
3924         /*
3925          * The credentials on a new netlogon pipe are the ones we are passed
3926          * in - copy them over.
3927          */
3928         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3929         if (result->dc == NULL) {
3930                 DEBUG(0, ("talloc failed\n"));
3931                 TALLOC_FREE(result);
3932                 return NT_STATUS_NO_MEMORY;
3933         }
3934
3935         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3936                   "for domain %s and bound using schannel.\n",
3937                   get_pipe_name_from_iface(interface),
3938                   cli->desthost, domain ));
3939
3940         *presult = result;
3941         return NT_STATUS_OK;
3942 }
3943
3944 /****************************************************************************
3945  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3946  Fetch the session key ourselves using a temporary netlogon pipe. This
3947  version uses an ntlmssp auth bound netlogon pipe to get the key.
3948  ****************************************************************************/
3949
3950 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3951                                                       const char *domain,
3952                                                       const char *username,
3953                                                       const char *password,
3954                                                       uint32 *pneg_flags,
3955                                                       struct rpc_pipe_client **presult)
3956 {
3957         struct rpc_pipe_client *netlogon_pipe = NULL;
3958         NTSTATUS status;
3959
3960         status = cli_rpc_pipe_open_spnego_ntlmssp(
3961                 cli, &ndr_table_netlogon.syntax_id, NCACN_NP,
3962                 PIPE_AUTH_LEVEL_PRIVACY,
3963                 domain, username, password, &netlogon_pipe);
3964         if (!NT_STATUS_IS_OK(status)) {
3965                 return status;
3966         }
3967
3968         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3969                                                  pneg_flags);
3970         if (!NT_STATUS_IS_OK(status)) {
3971                 TALLOC_FREE(netlogon_pipe);
3972                 return status;
3973         }
3974
3975         *presult = netlogon_pipe;
3976         return NT_STATUS_OK;
3977 }
3978
3979 /****************************************************************************
3980  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3981  Fetch the session key ourselves using a temporary netlogon pipe. This version
3982  uses an ntlmssp bind to get the session key.
3983  ****************************************************************************/
3984
3985 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3986                                                  const struct ndr_syntax_id *interface,
3987                                                  enum dcerpc_transport_t transport,
3988                                                  enum pipe_auth_level auth_level,
3989                                                  const char *domain,
3990                                                  const char *username,
3991                                                  const char *password,
3992                                                  struct rpc_pipe_client **presult)
3993 {
3994         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3995         struct rpc_pipe_client *netlogon_pipe = NULL;
3996         struct rpc_pipe_client *result = NULL;
3997         NTSTATUS status;
3998
3999         status = get_schannel_session_key_auth_ntlmssp(
4000                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4001         if (!NT_STATUS_IS_OK(status)) {
4002                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4003                         "key from server %s for domain %s.\n",
4004                         cli->desthost, domain ));
4005                 return status;
4006         }
4007
4008         status = cli_rpc_pipe_open_schannel_with_key(
4009                 cli, interface, transport, auth_level, domain, netlogon_pipe->dc,
4010                 &result);
4011
4012         /* Now we've bound using the session key we can close the netlog pipe. */
4013         TALLOC_FREE(netlogon_pipe);
4014
4015         if (NT_STATUS_IS_OK(status)) {
4016                 *presult = result;
4017         }
4018         return status;
4019 }
4020
4021 /****************************************************************************
4022  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4023  Fetch the session key ourselves using a temporary netlogon pipe.
4024  ****************************************************************************/
4025
4026 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4027                                     const struct ndr_syntax_id *interface,
4028                                     enum dcerpc_transport_t transport,
4029                                     enum pipe_auth_level auth_level,
4030                                     const char *domain,
4031                                     struct rpc_pipe_client **presult)
4032 {
4033         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4034         struct rpc_pipe_client *netlogon_pipe = NULL;
4035         struct rpc_pipe_client *result = NULL;
4036         NTSTATUS status;
4037
4038         *presult = NULL;
4039
4040         status = get_schannel_session_key(cli, domain, &neg_flags,
4041                                           &netlogon_pipe);
4042         if (!NT_STATUS_IS_OK(status)) {
4043                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4044                         "key from server %s for domain %s.\n",
4045                         cli->desthost, domain ));
4046                 return status;
4047         }
4048
4049         status = cli_rpc_pipe_open_schannel_with_key(
4050                 cli, interface, transport, auth_level, domain, netlogon_pipe->dc,
4051                 &result);
4052
4053         /* Now we've bound using the session key we can close the netlog pipe. */
4054         TALLOC_FREE(netlogon_pipe);
4055
4056         if (NT_STATUS_IS_OK(status)) {
4057                 *presult = result;
4058         }
4059
4060         return status;
4061 }
4062
4063 /****************************************************************************
4064  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4065  The idea is this can be called with service_princ, username and password all
4066  NULL so long as the caller has a TGT.
4067  ****************************************************************************/
4068
4069 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4070                                 const struct ndr_syntax_id *interface,
4071                                 enum pipe_auth_level auth_level,
4072                                 const char *service_princ,
4073                                 const char *username,
4074                                 const char *password,
4075                                 struct rpc_pipe_client **presult)
4076 {
4077 #ifdef HAVE_KRB5
4078         struct rpc_pipe_client *result;
4079         struct cli_pipe_auth_data *auth;
4080         NTSTATUS status;
4081
4082         status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
4083         if (!NT_STATUS_IS_OK(status)) {
4084                 return status;
4085         }
4086
4087         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4088                                            username, password, &auth);
4089         if (!NT_STATUS_IS_OK(status)) {
4090                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4091                           nt_errstr(status)));
4092                 TALLOC_FREE(result);
4093                 return status;
4094         }
4095
4096         status = rpc_pipe_bind(result, auth);
4097         if (!NT_STATUS_IS_OK(status)) {
4098                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4099                           "with error %s\n", nt_errstr(status)));
4100                 TALLOC_FREE(result);
4101                 return status;
4102         }
4103
4104         *presult = result;
4105         return NT_STATUS_OK;
4106 #else
4107         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4108         return NT_STATUS_NOT_IMPLEMENTED;
4109 #endif
4110 }
4111
4112 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4113                              struct rpc_pipe_client *cli,
4114                              DATA_BLOB *session_key)
4115 {
4116         if (!session_key || !cli) {
4117                 return NT_STATUS_INVALID_PARAMETER;
4118         }
4119
4120         if (!cli->auth) {
4121                 return NT_STATUS_INVALID_PARAMETER;
4122         }
4123
4124         switch (cli->auth->auth_type) {
4125                 case PIPE_AUTH_TYPE_SCHANNEL:
4126                         *session_key = data_blob_talloc(mem_ctx,
4127                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4128                         break;
4129                 case PIPE_AUTH_TYPE_NTLMSSP:
4130                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4131                         *session_key = data_blob_talloc(mem_ctx,
4132                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4133                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4134                         break;
4135                 case PIPE_AUTH_TYPE_KRB5:
4136                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4137                         *session_key = data_blob_talloc(mem_ctx,
4138                                 cli->auth->a_u.kerberos_auth->session_key.data,
4139                                 cli->auth->a_u.kerberos_auth->session_key.length);
4140                         break;
4141                 case PIPE_AUTH_TYPE_NONE:
4142                         *session_key = data_blob_talloc(mem_ctx,
4143                                 cli->auth->user_session_key.data,
4144                                 cli->auth->user_session_key.length);
4145                         break;
4146                 default:
4147                         return NT_STATUS_NO_USER_SESSION_KEY;
4148         }
4149
4150         return NT_STATUS_OK;
4151 }