2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &syntax_spoolss },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85 struct cli_state *cli,
86 const struct ndr_syntax_id *interface)
89 for (i = 0; pipe_names[i].client_pipe; i++) {
90 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
92 return &pipe_names[i].client_pipe[5];
97 * Here we should ask \\epmapper, but for now our code is only
98 * interested in the known pipes mentioned in pipe_names[]
104 /********************************************************************
105 Map internal value to wire value.
106 ********************************************************************/
108 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
112 case PIPE_AUTH_TYPE_NONE:
113 return RPC_ANONYMOUS_AUTH_TYPE;
115 case PIPE_AUTH_TYPE_NTLMSSP:
116 return RPC_NTLMSSP_AUTH_TYPE;
118 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
119 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
120 return RPC_SPNEGO_AUTH_TYPE;
122 case PIPE_AUTH_TYPE_SCHANNEL:
123 return RPC_SCHANNEL_AUTH_TYPE;
125 case PIPE_AUTH_TYPE_KRB5:
126 return RPC_KRB5_AUTH_TYPE;
129 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
131 (unsigned int)auth_type ));
137 /********************************************************************
138 Pipe description for a DEBUG
139 ********************************************************************/
140 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
144 switch (cli->transport_type) {
146 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
149 cli->trans.np.pipe_name,
150 (unsigned int)(cli->trans.np.fnum));
153 case NCACN_UNIX_STREAM:
154 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
155 cli->desthost, cli->trans.sock.fd);
158 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
161 SMB_ASSERT(result != NULL);
165 /********************************************************************
167 ********************************************************************/
169 static uint32 get_rpc_call_id(void)
171 static uint32 call_id = 0;
176 * Realloc pdu to have a least "size" bytes
179 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
183 if (prs_data_size(pdu) >= size) {
187 extra_size = size - prs_data_size(pdu);
189 if (!prs_force_grow(pdu, extra_size)) {
190 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
191 "%d bytes.\n", (int)extra_size));
195 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
196 (int)extra_size, prs_data_size(pdu)));
201 /*******************************************************************
202 Use SMBreadX to get rest of one fragment's worth of rpc data.
203 Reads the whole size or give an error message
204 ********************************************************************/
206 struct rpc_read_state {
207 struct event_context *ev;
208 struct rpc_pipe_client *cli;
214 static void rpc_read_np_done(struct async_req *subreq);
215 static void rpc_read_sock_done(struct async_req *subreq);
217 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
218 struct event_context *ev,
219 struct rpc_pipe_client *cli,
220 char *data, size_t size)
222 struct async_req *result, *subreq;
223 struct rpc_read_state *state;
225 if (!async_req_setup(mem_ctx, &result, &state,
226 struct rpc_read_state)) {
235 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
237 if (cli->transport_type == NCACN_NP) {
238 subreq = cli_read_andx_send(
239 state, ev, cli->trans.np.cli,
240 cli->trans.np.fnum, 0, size);
241 if (subreq == NULL) {
242 DEBUG(10, ("cli_read_andx_send failed\n"));
245 subreq->async.fn = rpc_read_np_done;
246 subreq->async.priv = result;
250 if ((cli->transport_type == NCACN_IP_TCP)
251 || (cli->transport_type == NCACN_UNIX_STREAM)) {
252 subreq = recvall_send(state, ev, cli->trans.sock.fd,
254 if (subreq == NULL) {
255 DEBUG(10, ("recvall_send failed\n"));
258 subreq->async.fn = rpc_read_sock_done;
259 subreq->async.priv = result;
263 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
271 static void rpc_read_np_done(struct async_req *subreq)
273 struct async_req *req = talloc_get_type_abort(
274 subreq->async.priv, struct async_req);
275 struct rpc_read_state *state = talloc_get_type_abort(
276 req->private_data, struct rpc_read_state);
281 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
283 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
286 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
287 status = NT_STATUS_OK;
289 if (!NT_STATUS_IS_OK(status)) {
291 async_req_error(req, status);
295 memcpy(state->data + state->num_read, rcvbuf, received);
298 state->num_read += received;
300 if (state->num_read == state->size) {
305 subreq = cli_read_andx_send(
306 state, state->ev, state->cli->trans.np.cli,
307 state->cli->trans.np.fnum, 0,
308 state->size - state->num_read);
310 if (async_req_nomem(subreq, req)) {
314 subreq->async.fn = rpc_read_np_done;
315 subreq->async.priv = req;
318 static void rpc_read_sock_done(struct async_req *subreq)
320 struct async_req *req = talloc_get_type_abort(
321 subreq->async.priv, struct async_req);
324 status = recvall_recv(subreq);
326 if (!NT_STATUS_IS_OK(status)) {
327 async_req_error(req, status);
334 static NTSTATUS rpc_read_recv(struct async_req *req)
336 return async_req_simple_recv(req);
339 struct rpc_write_state {
340 struct event_context *ev;
341 struct rpc_pipe_client *cli;
347 static void rpc_write_np_done(struct async_req *subreq);
348 static void rpc_write_sock_done(struct async_req *subreq);
350 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
351 struct event_context *ev,
352 struct rpc_pipe_client *cli,
353 const char *data, size_t size)
355 struct async_req *result, *subreq;
356 struct rpc_write_state *state;
358 if (!async_req_setup(mem_ctx, &result, &state,
359 struct rpc_write_state)) {
366 state->num_written = 0;
368 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
370 if (cli->transport_type == NCACN_NP) {
371 subreq = cli_write_andx_send(
372 state, ev, cli->trans.np.cli,
373 cli->trans.np.fnum, 8, /* 8 means message mode. */
374 (uint8_t *)data, 0, size);
375 if (subreq == NULL) {
376 DEBUG(10, ("cli_write_andx_send failed\n"));
379 subreq->async.fn = rpc_write_np_done;
380 subreq->async.priv = result;
384 if ((cli->transport_type == NCACN_IP_TCP)
385 || (cli->transport_type == NCACN_UNIX_STREAM)) {
386 subreq = sendall_send(state, ev, cli->trans.sock.fd,
388 if (subreq == NULL) {
389 DEBUG(10, ("sendall_send failed\n"));
392 subreq->async.fn = rpc_write_sock_done;
393 subreq->async.priv = result;
397 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
405 static void rpc_write_np_done(struct async_req *subreq)
407 struct async_req *req = talloc_get_type_abort(
408 subreq->async.priv, struct async_req);
409 struct rpc_write_state *state = talloc_get_type_abort(
410 req->private_data, struct rpc_write_state);
414 status = cli_write_andx_recv(subreq, &written);
416 if (!NT_STATUS_IS_OK(status)) {
417 async_req_error(req, status);
421 state->num_written += written;
423 if (state->num_written == state->size) {
428 subreq = cli_write_andx_send(
429 state, state->ev, state->cli->trans.np.cli,
430 state->cli->trans.np.fnum, 8,
431 (uint8_t *)(state->data + state->num_written),
432 0, state->size - state->num_written);
434 if (async_req_nomem(subreq, req)) {
438 subreq->async.fn = rpc_write_np_done;
439 subreq->async.priv = req;
442 static void rpc_write_sock_done(struct async_req *subreq)
444 struct async_req *req = talloc_get_type_abort(
445 subreq->async.priv, struct async_req);
448 status = sendall_recv(subreq);
450 if (!NT_STATUS_IS_OK(status)) {
451 async_req_error(req, status);
458 static NTSTATUS rpc_write_recv(struct async_req *req)
460 return async_req_simple_recv(req);
464 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
465 struct rpc_hdr_info *prhdr,
469 * This next call sets the endian bit correctly in current_pdu. We
470 * will propagate this to rbuf later.
473 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
474 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
475 return NT_STATUS_BUFFER_TOO_SMALL;
478 if (prhdr->frag_len > cli->max_recv_frag) {
479 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
480 " we only allow %d\n", (int)prhdr->frag_len,
481 (int)cli->max_recv_frag));
482 return NT_STATUS_BUFFER_TOO_SMALL;
488 /****************************************************************************
489 Try and get a PDU's worth of data from current_pdu. If not, then read more
491 ****************************************************************************/
493 struct get_complete_frag_state {
494 struct event_context *ev;
495 struct rpc_pipe_client *cli;
496 struct rpc_hdr_info *prhdr;
500 static void get_complete_frag_got_header(struct async_req *subreq);
501 static void get_complete_frag_got_rest(struct async_req *subreq);
503 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
504 struct event_context *ev,
505 struct rpc_pipe_client *cli,
506 struct rpc_hdr_info *prhdr,
509 struct async_req *result, *subreq;
510 struct get_complete_frag_state *state;
514 if (!async_req_setup(mem_ctx, &result, &state,
515 struct get_complete_frag_state)) {
520 state->prhdr = prhdr;
523 pdu_len = prs_data_size(pdu);
524 if (pdu_len < RPC_HEADER_LEN) {
525 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
526 status = NT_STATUS_NO_MEMORY;
529 subreq = rpc_read_send(state, state->ev, state->cli,
530 prs_data_p(state->pdu) + pdu_len,
531 RPC_HEADER_LEN - pdu_len);
532 if (subreq == NULL) {
533 status = NT_STATUS_NO_MEMORY;
536 subreq->async.fn = get_complete_frag_got_header;
537 subreq->async.priv = result;
541 status = parse_rpc_header(cli, prhdr, pdu);
542 if (!NT_STATUS_IS_OK(status)) {
547 * Ensure we have frag_len bytes of data.
549 if (pdu_len < prhdr->frag_len) {
550 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
551 status = NT_STATUS_NO_MEMORY;
554 subreq = rpc_read_send(state, state->ev, state->cli,
555 prs_data_p(pdu) + pdu_len,
556 prhdr->frag_len - pdu_len);
557 if (subreq == NULL) {
558 status = NT_STATUS_NO_MEMORY;
561 subreq->async.fn = get_complete_frag_got_rest;
562 subreq->async.priv = result;
566 status = NT_STATUS_OK;
568 if (async_post_status(result, ev, status)) {
575 static void get_complete_frag_got_header(struct async_req *subreq)
577 struct async_req *req = talloc_get_type_abort(
578 subreq->async.priv, struct async_req);
579 struct get_complete_frag_state *state = talloc_get_type_abort(
580 req->private_data, struct get_complete_frag_state);
583 status = rpc_read_recv(subreq);
585 if (!NT_STATUS_IS_OK(status)) {
586 async_req_error(req, status);
590 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
591 if (!NT_STATUS_IS_OK(status)) {
592 async_req_error(req, status);
596 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
597 async_req_error(req, NT_STATUS_NO_MEMORY);
602 * We're here in this piece of code because we've read exactly
603 * RPC_HEADER_LEN bytes into state->pdu.
606 subreq = rpc_read_send(state, state->ev, state->cli,
607 prs_data_p(state->pdu) + RPC_HEADER_LEN,
608 state->prhdr->frag_len - RPC_HEADER_LEN);
609 if (async_req_nomem(subreq, req)) {
612 subreq->async.fn = get_complete_frag_got_rest;
613 subreq->async.priv = req;
616 static void get_complete_frag_got_rest(struct async_req *subreq)
618 struct async_req *req = talloc_get_type_abort(
619 subreq->async.priv, struct async_req);
622 status = rpc_read_recv(subreq);
624 if (!NT_STATUS_IS_OK(status)) {
625 async_req_error(req, status);
631 static NTSTATUS get_complete_frag_recv(struct async_req *req)
633 return async_req_simple_recv(req);
636 /****************************************************************************
637 NTLMSSP specific sign/seal.
638 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
639 In fact I should probably abstract these into identical pieces of code... JRA.
640 ****************************************************************************/
642 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
643 prs_struct *current_pdu,
644 uint8 *p_ss_padding_len)
646 RPC_HDR_AUTH auth_info;
647 uint32 save_offset = prs_offset(current_pdu);
648 uint32 auth_len = prhdr->auth_len;
649 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
650 unsigned char *data = NULL;
652 unsigned char *full_packet_data = NULL;
653 size_t full_packet_data_len;
657 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
658 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
662 if (!ntlmssp_state) {
663 return NT_STATUS_INVALID_PARAMETER;
666 /* Ensure there's enough data for an authenticated response. */
667 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
668 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
669 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
670 (unsigned int)auth_len ));
671 return NT_STATUS_BUFFER_TOO_SMALL;
675 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
676 * after the RPC header.
677 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
678 * functions as NTLMv2 checks the rpc headers also.
681 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
682 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
684 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
685 full_packet_data_len = prhdr->frag_len - auth_len;
687 /* Pull the auth header and the following data into a blob. */
688 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
689 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
690 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
691 return NT_STATUS_BUFFER_TOO_SMALL;
694 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
695 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
696 return NT_STATUS_BUFFER_TOO_SMALL;
699 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
700 auth_blob.length = auth_len;
702 switch (cli->auth->auth_level) {
703 case PIPE_AUTH_LEVEL_PRIVACY:
704 /* Data is encrypted. */
705 status = ntlmssp_unseal_packet(ntlmssp_state,
708 full_packet_data_len,
710 if (!NT_STATUS_IS_OK(status)) {
711 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
712 "packet from %s. Error was %s.\n",
713 rpccli_pipe_txt(debug_ctx(), cli),
714 nt_errstr(status) ));
718 case PIPE_AUTH_LEVEL_INTEGRITY:
719 /* Data is signed. */
720 status = ntlmssp_check_packet(ntlmssp_state,
723 full_packet_data_len,
725 if (!NT_STATUS_IS_OK(status)) {
726 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
727 "packet from %s. Error was %s.\n",
728 rpccli_pipe_txt(debug_ctx(), cli),
729 nt_errstr(status) ));
734 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
735 "auth level %d\n", cli->auth->auth_level));
736 return NT_STATUS_INVALID_INFO_CLASS;
740 * Return the current pointer to the data offset.
743 if(!prs_set_offset(current_pdu, save_offset)) {
744 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
745 (unsigned int)save_offset ));
746 return NT_STATUS_BUFFER_TOO_SMALL;
750 * Remember the padding length. We must remove it from the real data
751 * stream once the sign/seal is done.
754 *p_ss_padding_len = auth_info.auth_pad_len;
759 /****************************************************************************
760 schannel specific sign/seal.
761 ****************************************************************************/
763 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
764 prs_struct *current_pdu,
765 uint8 *p_ss_padding_len)
767 RPC_HDR_AUTH auth_info;
768 RPC_AUTH_SCHANNEL_CHK schannel_chk;
769 uint32 auth_len = prhdr->auth_len;
770 uint32 save_offset = prs_offset(current_pdu);
771 struct schannel_auth_struct *schannel_auth =
772 cli->auth->a_u.schannel_auth;
775 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
776 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
780 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
781 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
782 return NT_STATUS_INVALID_PARAMETER;
785 if (!schannel_auth) {
786 return NT_STATUS_INVALID_PARAMETER;
789 /* Ensure there's enough data for an authenticated response. */
790 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
791 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
792 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
793 (unsigned int)auth_len ));
794 return NT_STATUS_INVALID_PARAMETER;
797 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
799 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
800 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
801 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
802 return NT_STATUS_BUFFER_TOO_SMALL;
805 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
806 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
807 return NT_STATUS_BUFFER_TOO_SMALL;
810 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
811 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
812 auth_info.auth_type));
813 return NT_STATUS_BUFFER_TOO_SMALL;
816 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
817 &schannel_chk, current_pdu, 0)) {
818 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
819 return NT_STATUS_BUFFER_TOO_SMALL;
822 if (!schannel_decode(schannel_auth,
823 cli->auth->auth_level,
826 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
828 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
829 "Connection to %s.\n",
830 rpccli_pipe_txt(debug_ctx(), cli)));
831 return NT_STATUS_INVALID_PARAMETER;
834 /* The sequence number gets incremented on both send and receive. */
835 schannel_auth->seq_num++;
838 * Return the current pointer to the data offset.
841 if(!prs_set_offset(current_pdu, save_offset)) {
842 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
843 (unsigned int)save_offset ));
844 return NT_STATUS_BUFFER_TOO_SMALL;
848 * Remember the padding length. We must remove it from the real data
849 * stream once the sign/seal is done.
852 *p_ss_padding_len = auth_info.auth_pad_len;
857 /****************************************************************************
858 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
859 ****************************************************************************/
861 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
862 prs_struct *current_pdu,
863 uint8 *p_ss_padding_len)
865 NTSTATUS ret = NT_STATUS_OK;
867 /* Paranioa checks for auth_len. */
868 if (prhdr->auth_len) {
869 if (prhdr->auth_len > prhdr->frag_len) {
870 return NT_STATUS_INVALID_PARAMETER;
873 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
874 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
875 /* Integer wrap attempt. */
876 return NT_STATUS_INVALID_PARAMETER;
881 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
884 switch(cli->auth->auth_type) {
885 case PIPE_AUTH_TYPE_NONE:
886 if (prhdr->auth_len) {
887 DEBUG(3, ("cli_pipe_validate_rpc_response: "
888 "Connection to %s - got non-zero "
890 rpccli_pipe_txt(debug_ctx(), cli),
891 (unsigned int)prhdr->auth_len ));
892 return NT_STATUS_INVALID_PARAMETER;
896 case PIPE_AUTH_TYPE_NTLMSSP:
897 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
898 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
899 if (!NT_STATUS_IS_OK(ret)) {
904 case PIPE_AUTH_TYPE_SCHANNEL:
905 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
906 if (!NT_STATUS_IS_OK(ret)) {
911 case PIPE_AUTH_TYPE_KRB5:
912 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
914 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
915 "to %s - unknown internal auth type %u.\n",
916 rpccli_pipe_txt(debug_ctx(), cli),
917 cli->auth->auth_type ));
918 return NT_STATUS_INVALID_INFO_CLASS;
924 /****************************************************************************
925 Do basic authentication checks on an incoming pdu.
926 ****************************************************************************/
928 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
929 prs_struct *current_pdu,
930 uint8 expected_pkt_type,
933 prs_struct *return_data)
936 NTSTATUS ret = NT_STATUS_OK;
937 uint32 current_pdu_len = prs_data_size(current_pdu);
939 if (current_pdu_len != prhdr->frag_len) {
940 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
941 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
942 return NT_STATUS_INVALID_PARAMETER;
946 * Point the return values at the real data including the RPC
947 * header. Just in case the caller wants it.
949 *ppdata = prs_data_p(current_pdu);
950 *pdata_len = current_pdu_len;
952 /* Ensure we have the correct type. */
953 switch (prhdr->pkt_type) {
954 case RPC_ALTCONTRESP:
957 /* Alter context and bind ack share the same packet definitions. */
963 RPC_HDR_RESP rhdr_resp;
964 uint8 ss_padding_len = 0;
966 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
967 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
968 return NT_STATUS_BUFFER_TOO_SMALL;
971 /* Here's where we deal with incoming sign/seal. */
972 ret = cli_pipe_validate_rpc_response(cli, prhdr,
973 current_pdu, &ss_padding_len);
974 if (!NT_STATUS_IS_OK(ret)) {
978 /* Point the return values at the NDR data. Remember to remove any ss padding. */
979 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
981 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
982 return NT_STATUS_BUFFER_TOO_SMALL;
985 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
987 /* Remember to remove the auth footer. */
988 if (prhdr->auth_len) {
989 /* We've already done integer wrap tests on auth_len in
990 cli_pipe_validate_rpc_response(). */
991 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
992 return NT_STATUS_BUFFER_TOO_SMALL;
994 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
997 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
998 current_pdu_len, *pdata_len, ss_padding_len ));
1001 * If this is the first reply, and the allocation hint is reasonably, try and
1002 * set up the return_data parse_struct to the correct size.
1005 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1006 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1007 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1008 "too large to allocate\n",
1009 (unsigned int)rhdr_resp.alloc_hint ));
1010 return NT_STATUS_NO_MEMORY;
1018 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1019 "received from %s!\n",
1020 rpccli_pipe_txt(debug_ctx(), cli)));
1021 /* Use this for now... */
1022 return NT_STATUS_NETWORK_ACCESS_DENIED;
1026 RPC_HDR_RESP rhdr_resp;
1027 RPC_HDR_FAULT fault_resp;
1029 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1030 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1031 return NT_STATUS_BUFFER_TOO_SMALL;
1034 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1035 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1036 return NT_STATUS_BUFFER_TOO_SMALL;
1039 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1040 "code %s received from %s!\n",
1041 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1042 rpccli_pipe_txt(debug_ctx(), cli)));
1043 if (NT_STATUS_IS_OK(fault_resp.status)) {
1044 return NT_STATUS_UNSUCCESSFUL;
1046 return fault_resp.status;
1051 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1053 (unsigned int)prhdr->pkt_type,
1054 rpccli_pipe_txt(debug_ctx(), cli)));
1055 return NT_STATUS_INVALID_INFO_CLASS;
1058 if (prhdr->pkt_type != expected_pkt_type) {
1059 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1060 "got an unexpected RPC packet type - %u, not %u\n",
1061 rpccli_pipe_txt(debug_ctx(), cli),
1063 expected_pkt_type));
1064 return NT_STATUS_INVALID_INFO_CLASS;
1067 /* Do this just before return - we don't want to modify any rpc header
1068 data before now as we may have needed to do cryptographic actions on
1071 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1072 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1073 "setting fragment first/last ON.\n"));
1074 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1077 return NT_STATUS_OK;
1080 /****************************************************************************
1081 Ensure we eat the just processed pdu from the current_pdu prs_struct.
1082 Normally the frag_len and buffer size will match, but on the first trans
1083 reply there is a theoretical chance that buffer size > frag_len, so we must
1085 ****************************************************************************/
1087 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1089 uint32 current_pdu_len = prs_data_size(current_pdu);
1091 if (current_pdu_len < prhdr->frag_len) {
1092 return NT_STATUS_BUFFER_TOO_SMALL;
1096 if (current_pdu_len == (uint32)prhdr->frag_len) {
1097 prs_mem_free(current_pdu);
1098 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1099 /* Make current_pdu dynamic with no memory. */
1100 prs_give_memory(current_pdu, 0, 0, True);
1101 return NT_STATUS_OK;
1105 * Oh no ! More data in buffer than we processed in current pdu.
1106 * Cheat. Move the data down and shrink the buffer.
1109 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1110 current_pdu_len - prhdr->frag_len);
1112 /* Remember to set the read offset back to zero. */
1113 prs_set_offset(current_pdu, 0);
1115 /* Shrink the buffer. */
1116 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1117 return NT_STATUS_BUFFER_TOO_SMALL;
1120 return NT_STATUS_OK;
1123 /****************************************************************************
1124 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1125 ****************************************************************************/
1127 struct cli_api_pipe_state {
1128 struct event_context *ev;
1129 struct rpc_pipe_client *cli;
1130 uint32_t max_rdata_len;
1135 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1137 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1139 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1140 struct event_context *ev,
1141 struct rpc_pipe_client *cli,
1142 uint8_t *data, size_t data_len,
1143 uint32_t max_rdata_len)
1145 struct async_req *result, *subreq;
1146 struct cli_api_pipe_state *state;
1149 if (!async_req_setup(mem_ctx, &result, &state,
1150 struct cli_api_pipe_state)) {
1155 state->max_rdata_len = max_rdata_len;
1157 if (state->max_rdata_len < RPC_HEADER_LEN) {
1159 * For a RPC reply we always need at least RPC_HEADER_LEN
1160 * bytes. We check this here because we will receive
1161 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1163 status = NT_STATUS_INVALID_PARAMETER;
1167 if (cli->transport_type == NCACN_NP) {
1170 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1171 SSVAL(setup+1, 0, cli->trans.np.fnum);
1173 subreq = cli_trans_send(
1174 state, ev, cli->trans.np.cli, SMBtrans,
1175 "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1176 NULL, 0, 0, data, data_len, max_rdata_len);
1177 if (subreq == NULL) {
1178 status = NT_STATUS_NO_MEMORY;
1181 subreq->async.fn = cli_api_pipe_np_trans_done;
1182 subreq->async.priv = result;
1186 if ((cli->transport_type == NCACN_IP_TCP)
1187 || (cli->transport_type == NCACN_UNIX_STREAM)) {
1188 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1190 if (subreq == NULL) {
1191 status = NT_STATUS_NO_MEMORY;
1194 subreq->async.fn = cli_api_pipe_sock_send_done;
1195 subreq->async.priv = result;
1199 status = NT_STATUS_INVALID_PARAMETER;
1202 if (async_post_status(result, ev, status)) {
1205 TALLOC_FREE(result);
1209 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1211 struct async_req *req = talloc_get_type_abort(
1212 subreq->async.priv, struct async_req);
1213 struct cli_api_pipe_state *state = talloc_get_type_abort(
1214 req->private_data, struct cli_api_pipe_state);
1217 status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1218 &state->rdata, &state->rdata_len);
1219 TALLOC_FREE(subreq);
1220 if (!NT_STATUS_IS_OK(status)) {
1221 async_req_error(req, status);
1224 async_req_done(req);
1227 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1229 struct async_req *req = talloc_get_type_abort(
1230 subreq->async.priv, struct async_req);
1231 struct cli_api_pipe_state *state = talloc_get_type_abort(
1232 req->private_data, struct cli_api_pipe_state);
1235 status = sendall_recv(subreq);
1236 TALLOC_FREE(subreq);
1237 if (!NT_STATUS_IS_OK(status)) {
1238 async_req_error(req, status);
1242 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1243 if (async_req_nomem(state->rdata, req)) {
1246 state->rdata_len = RPC_HEADER_LEN;
1248 subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1249 state->rdata, RPC_HEADER_LEN, 0);
1250 if (async_req_nomem(subreq, req)) {
1253 subreq->async.fn = cli_api_pipe_sock_read_done;
1254 subreq->async.priv = req;
1257 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1259 struct async_req *req = talloc_get_type_abort(
1260 subreq->async.priv, struct async_req);
1263 status = recvall_recv(subreq);
1264 TALLOC_FREE(subreq);
1265 if (!NT_STATUS_IS_OK(status)) {
1266 async_req_error(req, status);
1269 async_req_done(req);
1272 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1273 uint8_t **prdata, uint32_t *prdata_len)
1275 struct cli_api_pipe_state *state = talloc_get_type_abort(
1276 req->private_data, struct cli_api_pipe_state);
1279 if (async_req_is_error(req, &status)) {
1283 *prdata = talloc_move(mem_ctx, &state->rdata);
1284 *prdata_len = state->rdata_len;
1285 return NT_STATUS_OK;
1288 /****************************************************************************
1289 Send data on an rpc pipe via trans. The prs_struct data must be the last
1290 pdu fragment of an NDR data stream.
1292 Receive response data from an rpc pipe, which may be large...
1294 Read the first fragment: unfortunately have to use SMBtrans for the first
1295 bit, then SMBreadX for subsequent bits.
1297 If first fragment received also wasn't the last fragment, continue
1298 getting fragments until we _do_ receive the last fragment.
1300 Request/Response PDU's look like the following...
1302 |<------------------PDU len----------------------------------------------->|
1303 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1305 +------------+-----------------+-------------+---------------+-------------+
1306 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1307 +------------+-----------------+-------------+---------------+-------------+
1309 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1310 signing & sealing being negotiated.
1312 ****************************************************************************/
1314 struct rpc_api_pipe_state {
1315 struct event_context *ev;
1316 struct rpc_pipe_client *cli;
1317 uint8_t expected_pkt_type;
1319 prs_struct incoming_frag;
1320 struct rpc_hdr_info rhdr;
1322 prs_struct incoming_pdu; /* Incoming reply */
1323 uint32_t incoming_pdu_offset;
1326 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1328 prs_mem_free(&state->incoming_frag);
1329 prs_mem_free(&state->incoming_pdu);
1333 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1334 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1336 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1337 struct event_context *ev,
1338 struct rpc_pipe_client *cli,
1339 prs_struct *data, /* Outgoing PDU */
1340 uint8_t expected_pkt_type)
1342 struct async_req *result, *subreq;
1343 struct rpc_api_pipe_state *state;
1344 uint16_t max_recv_frag;
1347 if (!async_req_setup(mem_ctx, &result, &state,
1348 struct rpc_api_pipe_state)) {
1353 state->expected_pkt_type = expected_pkt_type;
1354 state->incoming_pdu_offset = 0;
1356 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1358 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1359 /* Make incoming_pdu dynamic with no memory. */
1360 prs_give_memory(&state->incoming_pdu, 0, 0, true);
1362 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1365 * Ensure we're not sending too much.
1367 if (prs_offset(data) > cli->max_xmit_frag) {
1368 status = NT_STATUS_INVALID_PARAMETER;
1372 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1374 max_recv_frag = cli->max_recv_frag;
1377 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1380 subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1381 prs_offset(data), max_recv_frag);
1382 if (subreq == NULL) {
1383 status = NT_STATUS_NO_MEMORY;
1386 subreq->async.fn = rpc_api_pipe_trans_done;
1387 subreq->async.priv = result;
1391 if (async_post_status(result, ev, status)) {
1394 TALLOC_FREE(result);
1398 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1400 struct async_req *req = talloc_get_type_abort(
1401 subreq->async.priv, struct async_req);
1402 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1403 req->private_data, struct rpc_api_pipe_state);
1405 uint8_t *rdata = NULL;
1406 uint32_t rdata_len = 0;
1409 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1410 TALLOC_FREE(subreq);
1411 if (!NT_STATUS_IS_OK(status)) {
1412 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1413 async_req_error(req, status);
1417 if (rdata == NULL) {
1418 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1419 rpccli_pipe_txt(debug_ctx(), state->cli)));
1420 async_req_done(req);
1425 * Give the memory received from cli_trans as dynamic to the current
1426 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1429 rdata_copy = (char *)memdup(rdata, rdata_len);
1431 if (async_req_nomem(rdata_copy, req)) {
1434 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1436 /* Ensure we have enough data for a pdu. */
1437 subreq = get_complete_frag_send(state, state->ev, state->cli,
1438 &state->rhdr, &state->incoming_frag);
1439 if (async_req_nomem(subreq, req)) {
1442 subreq->async.fn = rpc_api_pipe_got_pdu;
1443 subreq->async.priv = req;
1446 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1448 struct async_req *req = talloc_get_type_abort(
1449 subreq->async.priv, struct async_req);
1450 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1451 req->private_data, struct rpc_api_pipe_state);
1454 uint32_t rdata_len = 0;
1456 status = get_complete_frag_recv(subreq);
1457 TALLOC_FREE(subreq);
1458 if (!NT_STATUS_IS_OK(status)) {
1459 DEBUG(5, ("get_complete_frag failed: %s\n",
1460 nt_errstr(status)));
1461 async_req_error(req, status);
1465 status = cli_pipe_validate_current_pdu(
1466 state->cli, &state->rhdr, &state->incoming_frag,
1467 state->expected_pkt_type, &rdata, &rdata_len,
1468 &state->incoming_pdu);
1470 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1471 (unsigned)prs_data_size(&state->incoming_frag),
1472 (unsigned)state->incoming_pdu_offset,
1473 nt_errstr(status)));
1475 if (!NT_STATUS_IS_OK(status)) {
1476 async_req_error(req, status);
1480 if ((state->rhdr.flags & RPC_FLG_FIRST)
1481 && (state->rhdr.pack_type[0] == 0)) {
1483 * Set the data type correctly for big-endian data on the
1486 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1488 rpccli_pipe_txt(debug_ctx(), state->cli)));
1489 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1492 * Check endianness on subsequent packets.
1494 if (state->incoming_frag.bigendian_data
1495 != state->incoming_pdu.bigendian_data) {
1496 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1498 state->incoming_pdu.bigendian_data?"big":"little",
1499 state->incoming_frag.bigendian_data?"big":"little"));
1500 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1504 /* Now copy the data portion out of the pdu into rbuf. */
1505 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1506 async_req_error(req, NT_STATUS_NO_MEMORY);
1510 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1511 rdata, (size_t)rdata_len);
1512 state->incoming_pdu_offset += rdata_len;
1514 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1515 &state->incoming_frag);
1516 if (!NT_STATUS_IS_OK(status)) {
1517 async_req_error(req, status);
1521 if (state->rhdr.flags & RPC_FLG_LAST) {
1522 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1523 rpccli_pipe_txt(debug_ctx(), state->cli),
1524 (unsigned)prs_data_size(&state->incoming_pdu)));
1525 async_req_done(req);
1529 subreq = get_complete_frag_send(state, state->ev, state->cli,
1530 &state->rhdr, &state->incoming_frag);
1531 if (async_req_nomem(subreq, req)) {
1534 subreq->async.fn = rpc_api_pipe_got_pdu;
1535 subreq->async.priv = req;
1538 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1539 prs_struct *reply_pdu)
1541 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1542 req->private_data, struct rpc_api_pipe_state);
1545 if (async_req_is_error(req, &status)) {
1549 *reply_pdu = state->incoming_pdu;
1550 reply_pdu->mem_ctx = mem_ctx;
1553 * Prevent state->incoming_pdu from being freed in
1554 * rpc_api_pipe_state_destructor()
1556 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1558 return NT_STATUS_OK;
1561 /*******************************************************************
1562 Creates krb5 auth bind.
1563 ********************************************************************/
1565 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1566 enum pipe_auth_level auth_level,
1567 RPC_HDR_AUTH *pauth_out,
1568 prs_struct *auth_data)
1572 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1573 DATA_BLOB tkt = data_blob_null;
1574 DATA_BLOB tkt_wrapped = data_blob_null;
1576 /* We may change the pad length before marshalling. */
1577 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1579 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1580 a->service_principal ));
1582 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1584 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1585 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1588 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1590 a->service_principal,
1591 error_message(ret) ));
1593 data_blob_free(&tkt);
1594 prs_mem_free(auth_data);
1595 return NT_STATUS_INVALID_PARAMETER;
1598 /* wrap that up in a nice GSS-API wrapping */
1599 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1601 data_blob_free(&tkt);
1603 /* Auth len in the rpc header doesn't include auth_header. */
1604 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1605 data_blob_free(&tkt_wrapped);
1606 prs_mem_free(auth_data);
1607 return NT_STATUS_NO_MEMORY;
1610 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1611 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1613 data_blob_free(&tkt_wrapped);
1614 return NT_STATUS_OK;
1616 return NT_STATUS_INVALID_PARAMETER;
1620 /*******************************************************************
1621 Creates SPNEGO NTLMSSP auth bind.
1622 ********************************************************************/
1624 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1625 enum pipe_auth_level auth_level,
1626 RPC_HDR_AUTH *pauth_out,
1627 prs_struct *auth_data)
1630 DATA_BLOB null_blob = data_blob_null;
1631 DATA_BLOB request = data_blob_null;
1632 DATA_BLOB spnego_msg = data_blob_null;
1634 /* We may change the pad length before marshalling. */
1635 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1637 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1638 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1642 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1643 data_blob_free(&request);
1644 prs_mem_free(auth_data);
1648 /* Wrap this in SPNEGO. */
1649 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1651 data_blob_free(&request);
1653 /* Auth len in the rpc header doesn't include auth_header. */
1654 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1655 data_blob_free(&spnego_msg);
1656 prs_mem_free(auth_data);
1657 return NT_STATUS_NO_MEMORY;
1660 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1661 dump_data(5, spnego_msg.data, spnego_msg.length);
1663 data_blob_free(&spnego_msg);
1664 return NT_STATUS_OK;
1667 /*******************************************************************
1668 Creates NTLMSSP auth bind.
1669 ********************************************************************/
1671 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1672 enum pipe_auth_level auth_level,
1673 RPC_HDR_AUTH *pauth_out,
1674 prs_struct *auth_data)
1677 DATA_BLOB null_blob = data_blob_null;
1678 DATA_BLOB request = data_blob_null;
1680 /* We may change the pad length before marshalling. */
1681 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1683 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1684 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1688 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1689 data_blob_free(&request);
1690 prs_mem_free(auth_data);
1694 /* Auth len in the rpc header doesn't include auth_header. */
1695 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1696 data_blob_free(&request);
1697 prs_mem_free(auth_data);
1698 return NT_STATUS_NO_MEMORY;
1701 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1702 dump_data(5, request.data, request.length);
1704 data_blob_free(&request);
1705 return NT_STATUS_OK;
1708 /*******************************************************************
1709 Creates schannel auth bind.
1710 ********************************************************************/
1712 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1713 enum pipe_auth_level auth_level,
1714 RPC_HDR_AUTH *pauth_out,
1715 prs_struct *auth_data)
1717 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1719 /* We may change the pad length before marshalling. */
1720 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1722 /* Use lp_workgroup() if domain not specified */
1724 if (!cli->auth->domain || !cli->auth->domain[0]) {
1725 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1726 if (cli->auth->domain == NULL) {
1727 return NT_STATUS_NO_MEMORY;
1731 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1735 * Now marshall the data into the auth parse_struct.
1738 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1739 &schannel_neg, auth_data, 0)) {
1740 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1741 prs_mem_free(auth_data);
1742 return NT_STATUS_NO_MEMORY;
1745 return NT_STATUS_OK;
1748 /*******************************************************************
1749 Creates the internals of a DCE/RPC bind request or alter context PDU.
1750 ********************************************************************/
1752 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1753 prs_struct *rpc_out,
1755 const RPC_IFACE *abstract,
1756 const RPC_IFACE *transfer,
1757 RPC_HDR_AUTH *phdr_auth,
1758 prs_struct *pauth_info)
1762 RPC_CONTEXT rpc_ctx;
1763 uint16 auth_len = prs_offset(pauth_info);
1764 uint8 ss_padding_len = 0;
1765 uint16 frag_len = 0;
1767 /* create the RPC context. */
1768 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1770 /* create the bind request RPC_HDR_RB */
1771 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1773 /* Start building the frag length. */
1774 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1776 /* Do we need to pad ? */
1778 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1780 ss_padding_len = 8 - (data_len % 8);
1781 phdr_auth->auth_pad_len = ss_padding_len;
1783 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1786 /* Create the request RPC_HDR */
1787 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1789 /* Marshall the RPC header */
1790 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1791 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1792 return NT_STATUS_NO_MEMORY;
1795 /* Marshall the bind request data */
1796 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1797 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1798 return NT_STATUS_NO_MEMORY;
1802 * Grow the outgoing buffer to store any auth info.
1806 if (ss_padding_len) {
1808 memset(pad, '\0', 8);
1809 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1810 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1811 return NT_STATUS_NO_MEMORY;
1815 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1816 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1817 return NT_STATUS_NO_MEMORY;
1821 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1822 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1823 return NT_STATUS_NO_MEMORY;
1827 return NT_STATUS_OK;
1830 /*******************************************************************
1831 Creates a DCE/RPC bind request.
1832 ********************************************************************/
1834 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1835 prs_struct *rpc_out,
1837 const RPC_IFACE *abstract,
1838 const RPC_IFACE *transfer,
1839 enum pipe_auth_type auth_type,
1840 enum pipe_auth_level auth_level)
1842 RPC_HDR_AUTH hdr_auth;
1843 prs_struct auth_info;
1844 NTSTATUS ret = NT_STATUS_OK;
1846 ZERO_STRUCT(hdr_auth);
1847 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1848 return NT_STATUS_NO_MEMORY;
1850 switch (auth_type) {
1851 case PIPE_AUTH_TYPE_SCHANNEL:
1852 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1853 if (!NT_STATUS_IS_OK(ret)) {
1854 prs_mem_free(&auth_info);
1859 case PIPE_AUTH_TYPE_NTLMSSP:
1860 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1861 if (!NT_STATUS_IS_OK(ret)) {
1862 prs_mem_free(&auth_info);
1867 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1868 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1869 if (!NT_STATUS_IS_OK(ret)) {
1870 prs_mem_free(&auth_info);
1875 case PIPE_AUTH_TYPE_KRB5:
1876 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1877 if (!NT_STATUS_IS_OK(ret)) {
1878 prs_mem_free(&auth_info);
1883 case PIPE_AUTH_TYPE_NONE:
1887 /* "Can't" happen. */
1888 return NT_STATUS_INVALID_INFO_CLASS;
1891 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1899 prs_mem_free(&auth_info);
1903 /*******************************************************************
1904 Create and add the NTLMSSP sign/seal auth header and data.
1905 ********************************************************************/
1907 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1909 uint32 ss_padding_len,
1910 prs_struct *outgoing_pdu)
1912 RPC_HDR_AUTH auth_info;
1914 DATA_BLOB auth_blob = data_blob_null;
1915 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1917 if (!cli->auth->a_u.ntlmssp_state) {
1918 return NT_STATUS_INVALID_PARAMETER;
1921 /* Init and marshall the auth header. */
1922 init_rpc_hdr_auth(&auth_info,
1923 map_pipe_auth_type_to_rpc_auth_type(
1924 cli->auth->auth_type),
1925 cli->auth->auth_level,
1927 1 /* context id. */);
1929 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1930 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1931 data_blob_free(&auth_blob);
1932 return NT_STATUS_NO_MEMORY;
1935 switch (cli->auth->auth_level) {
1936 case PIPE_AUTH_LEVEL_PRIVACY:
1937 /* Data portion is encrypted. */
1938 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1939 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1941 (unsigned char *)prs_data_p(outgoing_pdu),
1942 (size_t)prs_offset(outgoing_pdu),
1944 if (!NT_STATUS_IS_OK(status)) {
1945 data_blob_free(&auth_blob);
1950 case PIPE_AUTH_LEVEL_INTEGRITY:
1951 /* Data is signed. */
1952 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1953 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1955 (unsigned char *)prs_data_p(outgoing_pdu),
1956 (size_t)prs_offset(outgoing_pdu),
1958 if (!NT_STATUS_IS_OK(status)) {
1959 data_blob_free(&auth_blob);
1966 smb_panic("bad auth level");
1968 return NT_STATUS_INVALID_PARAMETER;
1971 /* Finally marshall the blob. */
1973 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1974 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1975 (unsigned int)NTLMSSP_SIG_SIZE));
1976 data_blob_free(&auth_blob);
1977 return NT_STATUS_NO_MEMORY;
1980 data_blob_free(&auth_blob);
1981 return NT_STATUS_OK;
1984 /*******************************************************************
1985 Create and add the schannel sign/seal auth header and data.
1986 ********************************************************************/
1988 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1990 uint32 ss_padding_len,
1991 prs_struct *outgoing_pdu)
1993 RPC_HDR_AUTH auth_info;
1994 RPC_AUTH_SCHANNEL_CHK verf;
1995 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1996 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1997 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
2000 return NT_STATUS_INVALID_PARAMETER;
2003 /* Init and marshall the auth header. */
2004 init_rpc_hdr_auth(&auth_info,
2005 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2006 cli->auth->auth_level,
2008 1 /* context id. */);
2010 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2011 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2012 return NT_STATUS_NO_MEMORY;
2015 switch (cli->auth->auth_level) {
2016 case PIPE_AUTH_LEVEL_PRIVACY:
2017 case PIPE_AUTH_LEVEL_INTEGRITY:
2018 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2021 schannel_encode(sas,
2022 cli->auth->auth_level,
2023 SENDER_IS_INITIATOR,
2033 smb_panic("bad auth level");
2035 return NT_STATUS_INVALID_PARAMETER;
2038 /* Finally marshall the blob. */
2039 smb_io_rpc_auth_schannel_chk("",
2040 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2045 return NT_STATUS_OK;
2048 /*******************************************************************
2049 Calculate how much data we're going to send in this packet, also
2050 work out any sign/seal padding length.
2051 ********************************************************************/
2053 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2057 uint32 *p_ss_padding)
2059 uint32 data_space, data_len;
2062 if ((data_left > 0) && (sys_random() % 2)) {
2063 data_left = MAX(data_left/2, 1);
2067 switch (cli->auth->auth_level) {
2068 case PIPE_AUTH_LEVEL_NONE:
2069 case PIPE_AUTH_LEVEL_CONNECT:
2070 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2071 data_len = MIN(data_space, data_left);
2074 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2077 case PIPE_AUTH_LEVEL_INTEGRITY:
2078 case PIPE_AUTH_LEVEL_PRIVACY:
2079 /* Treat the same for all authenticated rpc requests. */
2080 switch(cli->auth->auth_type) {
2081 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2082 case PIPE_AUTH_TYPE_NTLMSSP:
2083 *p_auth_len = NTLMSSP_SIG_SIZE;
2085 case PIPE_AUTH_TYPE_SCHANNEL:
2086 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2089 smb_panic("bad auth type");
2093 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2094 RPC_HDR_AUTH_LEN - *p_auth_len;
2096 data_len = MIN(data_space, data_left);
2099 *p_ss_padding = 8 - (data_len % 8);
2101 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2102 data_len + *p_ss_padding + /* data plus padding. */
2103 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2107 smb_panic("bad auth level");
2113 /*******************************************************************
2115 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2116 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2117 and deals with signing/sealing details.
2118 ********************************************************************/
2120 struct rpc_api_pipe_req_state {
2121 struct event_context *ev;
2122 struct rpc_pipe_client *cli;
2125 prs_struct *req_data;
2126 uint32_t req_data_sent;
2127 prs_struct outgoing_frag;
2128 prs_struct reply_pdu;
2131 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2133 prs_mem_free(&s->outgoing_frag);
2134 prs_mem_free(&s->reply_pdu);
2138 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2139 static void rpc_api_pipe_req_done(struct async_req *subreq);
2140 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2141 bool *is_last_frag);
2143 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2144 struct event_context *ev,
2145 struct rpc_pipe_client *cli,
2147 prs_struct *req_data)
2149 struct async_req *result, *subreq;
2150 struct rpc_api_pipe_req_state *state;
2154 if (!async_req_setup(mem_ctx, &result, &state,
2155 struct rpc_api_pipe_req_state)) {
2160 state->op_num = op_num;
2161 state->req_data = req_data;
2162 state->req_data_sent = 0;
2163 state->call_id = get_rpc_call_id();
2165 if (cli->max_xmit_frag
2166 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2167 /* Server is screwed up ! */
2168 status = NT_STATUS_INVALID_PARAMETER;
2172 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2174 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2176 status = NT_STATUS_NO_MEMORY;
2180 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2182 status = prepare_next_frag(state, &is_last_frag);
2183 if (!NT_STATUS_IS_OK(status)) {
2188 subreq = rpc_api_pipe_send(state, ev, state->cli,
2189 &state->outgoing_frag,
2191 if (subreq == NULL) {
2192 status = NT_STATUS_NO_MEMORY;
2195 subreq->async.fn = rpc_api_pipe_req_done;
2196 subreq->async.priv = result;
2198 subreq = rpc_write_send(state, ev, cli,
2199 prs_data_p(&state->outgoing_frag),
2200 prs_offset(&state->outgoing_frag));
2201 if (subreq == NULL) {
2202 status = NT_STATUS_NO_MEMORY;
2205 subreq->async.fn = rpc_api_pipe_req_write_done;
2206 subreq->async.priv = result;
2211 if (async_post_status(result, ev, status)) {
2214 TALLOC_FREE(result);
2218 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2222 RPC_HDR_REQ hdr_req;
2223 uint32_t data_sent_thistime;
2227 uint32_t ss_padding;
2229 char pad[8] = { 0, };
2232 data_left = prs_offset(state->req_data) - state->req_data_sent;
2234 data_sent_thistime = calculate_data_len_tosend(
2235 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2237 if (state->req_data_sent == 0) {
2238 flags = RPC_FLG_FIRST;
2241 if (data_sent_thistime == data_left) {
2242 flags |= RPC_FLG_LAST;
2245 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2246 return NT_STATUS_NO_MEMORY;
2249 /* Create and marshall the header and request header. */
2250 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2253 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2254 return NT_STATUS_NO_MEMORY;
2257 /* Create the rpc request RPC_HDR_REQ */
2258 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2261 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2262 &state->outgoing_frag, 0)) {
2263 return NT_STATUS_NO_MEMORY;
2266 /* Copy in the data, plus any ss padding. */
2267 if (!prs_append_some_prs_data(&state->outgoing_frag,
2268 state->req_data, state->req_data_sent,
2269 data_sent_thistime)) {
2270 return NT_STATUS_NO_MEMORY;
2273 /* Copy the sign/seal padding data. */
2274 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2275 return NT_STATUS_NO_MEMORY;
2278 /* Generate any auth sign/seal and add the auth footer. */
2279 switch (state->cli->auth->auth_type) {
2280 case PIPE_AUTH_TYPE_NONE:
2281 status = NT_STATUS_OK;
2283 case PIPE_AUTH_TYPE_NTLMSSP:
2284 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2285 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2286 &state->outgoing_frag);
2288 case PIPE_AUTH_TYPE_SCHANNEL:
2289 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2290 &state->outgoing_frag);
2293 status = NT_STATUS_INVALID_PARAMETER;
2297 state->req_data_sent += data_sent_thistime;
2298 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2303 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2305 struct async_req *req = talloc_get_type_abort(
2306 subreq->async.priv, struct async_req);
2307 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2308 req->private_data, struct rpc_api_pipe_req_state);
2312 status = rpc_write_recv(subreq);
2313 TALLOC_FREE(subreq);
2314 if (!NT_STATUS_IS_OK(status)) {
2315 async_req_error(req, status);
2319 status = prepare_next_frag(state, &is_last_frag);
2320 if (!NT_STATUS_IS_OK(status)) {
2321 async_req_error(req, status);
2326 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2327 &state->outgoing_frag,
2329 if (async_req_nomem(subreq, req)) {
2332 subreq->async.fn = rpc_api_pipe_req_done;
2333 subreq->async.priv = req;
2335 subreq = rpc_write_send(state, state->ev, state->cli,
2336 prs_data_p(&state->outgoing_frag),
2337 prs_offset(&state->outgoing_frag));
2338 if (async_req_nomem(subreq, req)) {
2341 subreq->async.fn = rpc_api_pipe_req_write_done;
2342 subreq->async.priv = req;
2346 static void rpc_api_pipe_req_done(struct async_req *subreq)
2348 struct async_req *req = talloc_get_type_abort(
2349 subreq->async.priv, struct async_req);
2350 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2351 req->private_data, struct rpc_api_pipe_req_state);
2354 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2355 TALLOC_FREE(subreq);
2356 if (!NT_STATUS_IS_OK(status)) {
2357 async_req_error(req, status);
2360 async_req_done(req);
2363 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2364 prs_struct *reply_pdu)
2366 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2367 req->private_data, struct rpc_api_pipe_req_state);
2370 if (async_req_is_error(req, &status)) {
2374 *reply_pdu = state->reply_pdu;
2375 reply_pdu->mem_ctx = mem_ctx;
2378 * Prevent state->req_pdu from being freed in
2379 * rpc_api_pipe_req_state_destructor()
2381 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2383 return NT_STATUS_OK;
2386 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2388 prs_struct *in_data,
2389 prs_struct *out_data)
2391 TALLOC_CTX *frame = talloc_stackframe();
2392 struct event_context *ev;
2393 struct async_req *req;
2394 NTSTATUS status = NT_STATUS_NO_MEMORY;
2396 ev = event_context_init(frame);
2401 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2406 while (req->state < ASYNC_REQ_DONE) {
2407 event_loop_once(ev);
2410 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2417 /****************************************************************************
2418 Set the handle state.
2419 ****************************************************************************/
2421 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2422 const char *pipe_name, uint16 device_state)
2424 bool state_set = False;
2426 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2427 char *rparam = NULL;
2429 uint32 rparam_len, rdata_len;
2431 if (pipe_name == NULL)
2434 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2435 cli->fnum, pipe_name, device_state));
2437 /* create parameters: device state */
2438 SSVAL(param, 0, device_state);
2440 /* create setup parameters. */
2442 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2444 /* send the data on \PIPE\ */
2445 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2446 setup, 2, 0, /* setup, length, max */
2447 param, 2, 0, /* param, length, max */
2448 NULL, 0, 1024, /* data, length, max */
2449 &rparam, &rparam_len, /* return param, length */
2450 &rdata, &rdata_len)) /* return data, length */
2452 DEBUG(5, ("Set Handle state: return OK\n"));
2463 /****************************************************************************
2464 Check the rpc bind acknowledge response.
2465 ****************************************************************************/
2467 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2469 if ( hdr_ba->addr.len == 0) {
2470 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2473 /* check the transfer syntax */
2474 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2475 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2476 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2480 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2481 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2482 hdr_ba->res.num_results, hdr_ba->res.reason));
2485 DEBUG(5,("check_bind_response: accepted!\n"));
2489 /*******************************************************************
2490 Creates a DCE/RPC bind authentication response.
2491 This is the packet that is sent back to the server once we
2492 have received a BIND-ACK, to finish the third leg of
2493 the authentication handshake.
2494 ********************************************************************/
2496 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2498 enum pipe_auth_type auth_type,
2499 enum pipe_auth_level auth_level,
2500 DATA_BLOB *pauth_blob,
2501 prs_struct *rpc_out)
2504 RPC_HDR_AUTH hdr_auth;
2507 /* Create the request RPC_HDR */
2508 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2509 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2510 pauth_blob->length );
2513 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2514 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2515 return NT_STATUS_NO_MEMORY;
2519 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2520 about padding - shouldn't this pad to length 8 ? JRA.
2523 /* 4 bytes padding. */
2524 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2525 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2526 return NT_STATUS_NO_MEMORY;
2529 /* Create the request RPC_HDR_AUTHA */
2530 init_rpc_hdr_auth(&hdr_auth,
2531 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2534 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2535 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2536 return NT_STATUS_NO_MEMORY;
2540 * Append the auth data to the outgoing buffer.
2543 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2544 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2545 return NT_STATUS_NO_MEMORY;
2548 return NT_STATUS_OK;
2551 /*******************************************************************
2552 Creates a DCE/RPC bind alter context authentication request which
2553 may contain a spnego auth blobl
2554 ********************************************************************/
2556 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2557 const RPC_IFACE *abstract,
2558 const RPC_IFACE *transfer,
2559 enum pipe_auth_level auth_level,
2560 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2561 prs_struct *rpc_out)
2563 RPC_HDR_AUTH hdr_auth;
2564 prs_struct auth_info;
2565 NTSTATUS ret = NT_STATUS_OK;
2567 ZERO_STRUCT(hdr_auth);
2568 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2569 return NT_STATUS_NO_MEMORY;
2571 /* We may change the pad length before marshalling. */
2572 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2574 if (pauth_blob->length) {
2575 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2576 prs_mem_free(&auth_info);
2577 return NT_STATUS_NO_MEMORY;
2581 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2588 prs_mem_free(&auth_info);
2592 /****************************************************************************
2594 ****************************************************************************/
2596 struct rpc_pipe_bind_state {
2597 struct event_context *ev;
2598 struct rpc_pipe_client *cli;
2600 uint32_t rpc_call_id;
2603 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2605 prs_mem_free(&state->rpc_out);
2609 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2610 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2611 struct rpc_pipe_bind_state *state,
2612 struct rpc_hdr_info *phdr,
2613 prs_struct *reply_pdu);
2614 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2615 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2616 struct rpc_pipe_bind_state *state,
2617 struct rpc_hdr_info *phdr,
2618 prs_struct *reply_pdu);
2619 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2621 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2622 struct event_context *ev,
2623 struct rpc_pipe_client *cli,
2624 struct cli_pipe_auth_data *auth)
2626 struct async_req *result, *subreq;
2627 struct rpc_pipe_bind_state *state;
2630 if (!async_req_setup(mem_ctx, &result, &state,
2631 struct rpc_pipe_bind_state)) {
2635 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2636 rpccli_pipe_txt(debug_ctx(), cli),
2637 (unsigned int)auth->auth_type,
2638 (unsigned int)auth->auth_level ));
2642 state->rpc_call_id = get_rpc_call_id();
2644 prs_init_empty(&state->rpc_out, state, MARSHALL);
2645 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2647 cli->auth = talloc_move(cli, &auth);
2649 /* Marshall the outgoing data. */
2650 status = create_rpc_bind_req(cli, &state->rpc_out,
2652 &cli->abstract_syntax,
2653 &cli->transfer_syntax,
2654 cli->auth->auth_type,
2655 cli->auth->auth_level);
2657 if (!NT_STATUS_IS_OK(status)) {
2661 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2663 if (subreq == NULL) {
2664 status = NT_STATUS_NO_MEMORY;
2667 subreq->async.fn = rpc_pipe_bind_step_one_done;
2668 subreq->async.priv = result;
2672 if (async_post_status(result, ev, status)) {
2675 TALLOC_FREE(result);
2679 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2681 struct async_req *req = talloc_get_type_abort(
2682 subreq->async.priv, struct async_req);
2683 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2684 req->private_data, struct rpc_pipe_bind_state);
2685 prs_struct reply_pdu;
2686 struct rpc_hdr_info hdr;
2687 struct rpc_hdr_ba_info hdr_ba;
2690 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2691 TALLOC_FREE(subreq);
2692 if (!NT_STATUS_IS_OK(status)) {
2693 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2694 rpccli_pipe_txt(debug_ctx(), state->cli),
2695 nt_errstr(status)));
2696 async_req_error(req, status);
2700 /* Unmarshall the RPC header */
2701 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2702 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2703 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2707 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2708 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2710 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2714 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2715 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2716 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2720 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2721 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2724 * For authenticated binds we may need to do 3 or 4 leg binds.
2727 switch(state->cli->auth->auth_type) {
2729 case PIPE_AUTH_TYPE_NONE:
2730 case PIPE_AUTH_TYPE_SCHANNEL:
2731 /* Bind complete. */
2732 async_req_done(req);
2735 case PIPE_AUTH_TYPE_NTLMSSP:
2736 /* Need to send AUTH3 packet - no reply. */
2737 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2739 if (!NT_STATUS_IS_OK(status)) {
2740 async_req_error(req, status);
2744 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2745 /* Need to send alter context request and reply. */
2746 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2748 if (!NT_STATUS_IS_OK(status)) {
2749 async_req_error(req, status);
2753 case PIPE_AUTH_TYPE_KRB5:
2757 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2758 (unsigned int)state->cli->auth->auth_type));
2759 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2763 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2764 struct rpc_pipe_bind_state *state,
2765 struct rpc_hdr_info *phdr,
2766 prs_struct *reply_pdu)
2768 DATA_BLOB server_response = data_blob_null;
2769 DATA_BLOB client_reply = data_blob_null;
2770 struct rpc_hdr_auth_info hdr_auth;
2771 struct async_req *subreq;
2774 if ((phdr->auth_len == 0)
2775 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2776 return NT_STATUS_INVALID_PARAMETER;
2779 if (!prs_set_offset(
2781 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2782 return NT_STATUS_INVALID_PARAMETER;
2785 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2786 return NT_STATUS_INVALID_PARAMETER;
2789 /* TODO - check auth_type/auth_level match. */
2791 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2792 prs_copy_data_out((char *)server_response.data, reply_pdu,
2795 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796 server_response, &client_reply);
2798 if (!NT_STATUS_IS_OK(status)) {
2799 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2800 "blob failed: %s.\n", nt_errstr(status)));
2804 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2806 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2807 state->cli->auth->auth_type,
2808 state->cli->auth->auth_level,
2809 &client_reply, &state->rpc_out);
2810 data_blob_free(&client_reply);
2812 if (!NT_STATUS_IS_OK(status)) {
2816 subreq = rpc_write_send(state, state->ev, state->cli,
2817 prs_data_p(&state->rpc_out),
2818 prs_offset(&state->rpc_out));
2819 if (subreq == NULL) {
2820 return NT_STATUS_NO_MEMORY;
2822 subreq->async.fn = rpc_bind_auth3_write_done;
2823 subreq->async.priv = req;
2824 return NT_STATUS_OK;
2827 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2829 struct async_req *req = talloc_get_type_abort(
2830 subreq->async.priv, struct async_req);
2833 status = rpc_write_recv(subreq);
2834 TALLOC_FREE(subreq);
2835 if (!NT_STATUS_IS_OK(status)) {
2836 async_req_error(req, status);
2839 async_req_done(req);
2842 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2843 struct rpc_pipe_bind_state *state,
2844 struct rpc_hdr_info *phdr,
2845 prs_struct *reply_pdu)
2847 DATA_BLOB server_spnego_response = data_blob_null;
2848 DATA_BLOB server_ntlm_response = data_blob_null;
2849 DATA_BLOB client_reply = data_blob_null;
2850 DATA_BLOB tmp_blob = data_blob_null;
2851 RPC_HDR_AUTH hdr_auth;
2852 struct async_req *subreq;
2855 if ((phdr->auth_len == 0)
2856 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2857 return NT_STATUS_INVALID_PARAMETER;
2860 /* Process the returned NTLMSSP blob first. */
2861 if (!prs_set_offset(
2863 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2864 return NT_STATUS_INVALID_PARAMETER;
2867 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2868 return NT_STATUS_INVALID_PARAMETER;
2871 server_spnego_response = data_blob(NULL, phdr->auth_len);
2872 prs_copy_data_out((char *)server_spnego_response.data,
2873 reply_pdu, phdr->auth_len);
2876 * The server might give us back two challenges - tmp_blob is for the
2879 if (!spnego_parse_challenge(server_spnego_response,
2880 &server_ntlm_response, &tmp_blob)) {
2881 data_blob_free(&server_spnego_response);
2882 data_blob_free(&server_ntlm_response);
2883 data_blob_free(&tmp_blob);
2884 return NT_STATUS_INVALID_PARAMETER;
2887 /* We're finished with the server spnego response and the tmp_blob. */
2888 data_blob_free(&server_spnego_response);
2889 data_blob_free(&tmp_blob);
2891 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2892 server_ntlm_response, &client_reply);
2894 /* Finished with the server_ntlm response */
2895 data_blob_free(&server_ntlm_response);
2897 if (!NT_STATUS_IS_OK(status)) {
2898 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2899 "using server blob failed.\n"));
2900 data_blob_free(&client_reply);
2904 /* SPNEGO wrap the client reply. */
2905 tmp_blob = spnego_gen_auth(client_reply);
2906 data_blob_free(&client_reply);
2907 client_reply = tmp_blob;
2908 tmp_blob = data_blob_null;
2910 /* Now prepare the alter context pdu. */
2911 prs_init_empty(&state->rpc_out, state, MARSHALL);
2913 status = create_rpc_alter_context(state->rpc_call_id,
2914 &state->cli->abstract_syntax,
2915 &state->cli->transfer_syntax,
2916 state->cli->auth->auth_level,
2919 data_blob_free(&client_reply);
2921 if (!NT_STATUS_IS_OK(status)) {
2925 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2926 &state->rpc_out, RPC_ALTCONTRESP);
2927 if (subreq == NULL) {
2928 return NT_STATUS_NO_MEMORY;
2930 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2931 subreq->async.priv = req;
2932 return NT_STATUS_OK;
2935 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2937 struct async_req *req = talloc_get_type_abort(
2938 subreq->async.priv, struct async_req);
2939 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2940 req->private_data, struct rpc_pipe_bind_state);
2941 DATA_BLOB server_spnego_response = data_blob_null;
2942 DATA_BLOB tmp_blob = data_blob_null;
2943 prs_struct reply_pdu;
2944 struct rpc_hdr_info hdr;
2945 struct rpc_hdr_auth_info hdr_auth;
2948 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2949 TALLOC_FREE(subreq);
2950 if (!NT_STATUS_IS_OK(status)) {
2951 async_req_error(req, status);
2955 /* Get the auth blob from the reply. */
2956 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2957 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2958 "unmarshall RPC_HDR.\n"));
2959 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2963 if (!prs_set_offset(
2965 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2966 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2970 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2971 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2975 server_spnego_response = data_blob(NULL, hdr.auth_len);
2976 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2979 /* Check we got a valid auth response. */
2980 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2981 OID_NTLMSSP, &tmp_blob)) {
2982 data_blob_free(&server_spnego_response);
2983 data_blob_free(&tmp_blob);
2984 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2988 data_blob_free(&server_spnego_response);
2989 data_blob_free(&tmp_blob);
2991 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2992 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2993 async_req_done(req);
2996 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2998 return async_req_simple_recv(req);
3001 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3002 struct cli_pipe_auth_data *auth)
3004 TALLOC_CTX *frame = talloc_stackframe();
3005 struct event_context *ev;
3006 struct async_req *req;
3007 NTSTATUS status = NT_STATUS_NO_MEMORY;
3009 ev = event_context_init(frame);
3014 req = rpc_pipe_bind_send(frame, ev, cli, auth);
3019 while (req->state < ASYNC_REQ_DONE) {
3020 event_loop_once(ev);
3023 status = rpc_pipe_bind_recv(req);
3029 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3030 unsigned int timeout)
3032 return cli_set_timeout(cli->trans.np.cli, timeout);
3035 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3037 if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3038 || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3039 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3043 if (cli->transport_type == NCACN_NP) {
3044 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3051 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3053 if (p->transport_type == NCACN_NP) {
3054 return p->trans.np.cli;
3059 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3061 if (p->transport_type == NCACN_NP) {
3063 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3065 DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3066 "pipe %s. Error was %s\n",
3067 rpccli_pipe_txt(debug_ctx(), p),
3068 cli_errstr(p->trans.np.cli)));
3071 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3072 rpccli_pipe_txt(debug_ctx(), p)));
3074 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3075 return ret ? -1 : 0;
3081 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3082 struct cli_pipe_auth_data **presult)
3084 struct cli_pipe_auth_data *result;
3086 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3087 if (result == NULL) {
3088 return NT_STATUS_NO_MEMORY;
3091 result->auth_type = PIPE_AUTH_TYPE_NONE;
3092 result->auth_level = PIPE_AUTH_LEVEL_NONE;
3094 result->user_name = talloc_strdup(result, "");
3095 result->domain = talloc_strdup(result, "");
3096 if ((result->user_name == NULL) || (result->domain == NULL)) {
3097 TALLOC_FREE(result);
3098 return NT_STATUS_NO_MEMORY;
3102 return NT_STATUS_OK;
3105 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3107 ntlmssp_end(&auth->a_u.ntlmssp_state);
3111 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3112 enum pipe_auth_type auth_type,
3113 enum pipe_auth_level auth_level,
3115 const char *username,
3116 const char *password,
3117 struct cli_pipe_auth_data **presult)
3119 struct cli_pipe_auth_data *result;
3122 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3123 if (result == NULL) {
3124 return NT_STATUS_NO_MEMORY;
3127 result->auth_type = auth_type;
3128 result->auth_level = auth_level;
3130 result->user_name = talloc_strdup(result, username);
3131 result->domain = talloc_strdup(result, domain);
3132 if ((result->user_name == NULL) || (result->domain == NULL)) {
3133 status = NT_STATUS_NO_MEMORY;
3137 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3138 if (!NT_STATUS_IS_OK(status)) {
3142 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3144 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3145 if (!NT_STATUS_IS_OK(status)) {
3149 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3150 if (!NT_STATUS_IS_OK(status)) {
3154 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3155 if (!NT_STATUS_IS_OK(status)) {
3160 * Turn off sign+seal to allow selected auth level to turn it back on.
3162 result->a_u.ntlmssp_state->neg_flags &=
3163 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3165 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3166 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3167 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3168 result->a_u.ntlmssp_state->neg_flags
3169 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3173 return NT_STATUS_OK;
3176 TALLOC_FREE(result);
3180 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3181 enum pipe_auth_level auth_level,
3182 const uint8_t sess_key[16],
3183 struct cli_pipe_auth_data **presult)
3185 struct cli_pipe_auth_data *result;
3187 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3188 if (result == NULL) {
3189 return NT_STATUS_NO_MEMORY;
3192 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3193 result->auth_level = auth_level;
3195 result->user_name = talloc_strdup(result, "");
3196 result->domain = talloc_strdup(result, domain);
3197 if ((result->user_name == NULL) || (result->domain == NULL)) {
3201 result->a_u.schannel_auth = talloc(result,
3202 struct schannel_auth_struct);
3203 if (result->a_u.schannel_auth == NULL) {
3207 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3208 sizeof(result->a_u.schannel_auth->sess_key));
3209 result->a_u.schannel_auth->seq_num = 0;
3212 return NT_STATUS_OK;
3215 TALLOC_FREE(result);
3216 return NT_STATUS_NO_MEMORY;
3220 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3222 data_blob_free(&auth->session_key);
3227 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3228 enum pipe_auth_level auth_level,
3229 const char *service_princ,
3230 const char *username,
3231 const char *password,
3232 struct cli_pipe_auth_data **presult)
3235 struct cli_pipe_auth_data *result;
3237 if ((username != NULL) && (password != NULL)) {
3238 int ret = kerberos_kinit_password(username, password, 0, NULL);
3240 return NT_STATUS_ACCESS_DENIED;
3244 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3245 if (result == NULL) {
3246 return NT_STATUS_NO_MEMORY;
3249 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3250 result->auth_level = auth_level;
3253 * Username / domain need fixing!
3255 result->user_name = talloc_strdup(result, "");
3256 result->domain = talloc_strdup(result, "");
3257 if ((result->user_name == NULL) || (result->domain == NULL)) {
3261 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3262 result, struct kerberos_auth_struct);
3263 if (result->a_u.kerberos_auth == NULL) {
3266 talloc_set_destructor(result->a_u.kerberos_auth,
3267 cli_auth_kerberos_data_destructor);
3269 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3270 result, service_princ);
3271 if (result->a_u.kerberos_auth->service_principal == NULL) {
3276 return NT_STATUS_OK;
3279 TALLOC_FREE(result);
3280 return NT_STATUS_NO_MEMORY;
3282 return NT_STATUS_NOT_SUPPORTED;
3286 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3288 close(p->trans.sock.fd);
3293 * Create an rpc pipe client struct, connecting to a tcp port.
3295 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3297 const struct ndr_syntax_id *abstract_syntax,
3298 struct rpc_pipe_client **presult)
3300 struct rpc_pipe_client *result;
3301 struct sockaddr_storage addr;
3304 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3305 if (result == NULL) {
3306 return NT_STATUS_NO_MEMORY;
3309 result->transport_type = NCACN_IP_TCP;
3311 result->abstract_syntax = *abstract_syntax;
3312 result->transfer_syntax = ndr_transfer_syntax;
3313 result->dispatch = cli_do_rpc_ndr;
3315 result->desthost = talloc_strdup(result, host);
3316 result->srv_name_slash = talloc_asprintf_strupper_m(
3317 result, "\\\\%s", result->desthost);
3318 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3319 status = NT_STATUS_NO_MEMORY;
3323 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3324 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3326 if (!resolve_name(host, &addr, 0)) {
3327 status = NT_STATUS_NOT_FOUND;
3331 status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3332 if (!NT_STATUS_IS_OK(status)) {
3336 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3339 return NT_STATUS_OK;
3342 TALLOC_FREE(result);
3347 * Determine the tcp port on which a dcerpc interface is listening
3348 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3351 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3352 const struct ndr_syntax_id *abstract_syntax,
3356 struct rpc_pipe_client *epm_pipe = NULL;
3357 struct cli_pipe_auth_data *auth = NULL;
3358 struct dcerpc_binding *map_binding = NULL;
3359 struct dcerpc_binding *res_binding = NULL;
3360 struct epm_twr_t *map_tower = NULL;
3361 struct epm_twr_t *res_towers = NULL;
3362 struct policy_handle *entry_handle = NULL;
3363 uint32_t num_towers = 0;
3364 uint32_t max_towers = 1;
3365 struct epm_twr_p_t towers;
3366 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3368 if (pport == NULL) {
3369 status = NT_STATUS_INVALID_PARAMETER;
3373 /* open the connection to the endpoint mapper */
3374 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3375 &ndr_table_epmapper.syntax_id,
3378 if (!NT_STATUS_IS_OK(status)) {
3382 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3383 if (!NT_STATUS_IS_OK(status)) {
3387 status = rpc_pipe_bind(epm_pipe, auth);
3388 if (!NT_STATUS_IS_OK(status)) {
3392 /* create tower for asking the epmapper */
3394 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3395 if (map_binding == NULL) {
3396 status = NT_STATUS_NO_MEMORY;
3400 map_binding->transport = NCACN_IP_TCP;
3401 map_binding->object = *abstract_syntax;
3402 map_binding->host = host; /* needed? */
3403 map_binding->endpoint = "0"; /* correct? needed? */
3405 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3406 if (map_tower == NULL) {
3407 status = NT_STATUS_NO_MEMORY;
3411 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3412 &(map_tower->tower));
3413 if (!NT_STATUS_IS_OK(status)) {
3417 /* allocate further parameters for the epm_Map call */
3419 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3420 if (res_towers == NULL) {
3421 status = NT_STATUS_NO_MEMORY;
3424 towers.twr = res_towers;
3426 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3427 if (entry_handle == NULL) {
3428 status = NT_STATUS_NO_MEMORY;
3432 /* ask the endpoint mapper for the port */
3434 status = rpccli_epm_Map(epm_pipe,
3436 CONST_DISCARD(struct GUID *,
3437 &(abstract_syntax->uuid)),
3444 if (!NT_STATUS_IS_OK(status)) {
3448 if (num_towers != 1) {
3449 status = NT_STATUS_UNSUCCESSFUL;
3453 /* extract the port from the answer */
3455 status = dcerpc_binding_from_tower(tmp_ctx,
3456 &(towers.twr->tower),
3458 if (!NT_STATUS_IS_OK(status)) {
3462 /* are further checks here necessary? */
3463 if (res_binding->transport != NCACN_IP_TCP) {
3464 status = NT_STATUS_UNSUCCESSFUL;
3468 *pport = (uint16_t)atoi(res_binding->endpoint);
3471 TALLOC_FREE(tmp_ctx);
3476 * Create a rpc pipe client struct, connecting to a host via tcp.
3477 * The port is determined by asking the endpoint mapper on the given
3480 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3481 const struct ndr_syntax_id *abstract_syntax,
3482 struct rpc_pipe_client **presult)
3489 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3490 if (!NT_STATUS_IS_OK(status)) {
3494 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3495 abstract_syntax, presult);
3501 /********************************************************************
3502 Create a rpc pipe client struct, connecting to a unix domain socket
3503 ********************************************************************/
3504 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3505 const struct ndr_syntax_id *abstract_syntax,
3506 struct rpc_pipe_client **presult)
3508 struct rpc_pipe_client *result;
3509 struct sockaddr_un addr;
3512 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3513 if (result == NULL) {
3514 return NT_STATUS_NO_MEMORY;
3517 result->transport_type = NCACN_UNIX_STREAM;
3519 result->abstract_syntax = *abstract_syntax;
3520 result->transfer_syntax = ndr_transfer_syntax;
3521 result->dispatch = cli_do_rpc_ndr;
3523 result->desthost = talloc_get_myname(result);
3524 result->srv_name_slash = talloc_asprintf_strupper_m(
3525 result, "\\\\%s", result->desthost);
3526 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3527 status = NT_STATUS_NO_MEMORY;
3531 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3532 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3534 result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3535 if (result->trans.sock.fd == -1) {
3536 status = map_nt_error_from_unix(errno);
3540 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3543 addr.sun_family = AF_UNIX;
3544 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3546 if (sys_connect(result->trans.sock.fd,
3547 (struct sockaddr *)&addr) == -1) {
3548 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3550 close(result->trans.sock.fd);
3551 return map_nt_error_from_unix(errno);
3555 return NT_STATUS_OK;
3558 TALLOC_FREE(result);
3563 /****************************************************************************
3564 Open a named pipe over SMB to a remote server.
3566 * CAVEAT CALLER OF THIS FUNCTION:
3567 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3568 * so be sure that this function is called AFTER any structure (vs pointer)
3569 * assignment of the cli. In particular, libsmbclient does structure
3570 * assignments of cli, which invalidates the data in the returned
3571 * rpc_pipe_client if this function is called before the structure assignment
3574 ****************************************************************************/
3576 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3577 const struct ndr_syntax_id *abstract_syntax,
3578 struct rpc_pipe_client **presult)
3580 struct rpc_pipe_client *result;
3583 /* sanity check to protect against crashes */
3586 return NT_STATUS_INVALID_HANDLE;
3589 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3590 if (result == NULL) {
3591 return NT_STATUS_NO_MEMORY;
3594 result->transport_type = NCACN_NP;
3596 result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3597 result, cli, abstract_syntax);
3598 if (result->trans.np.pipe_name == NULL) {
3599 DEBUG(1, ("Could not find pipe for interface\n"));
3600 TALLOC_FREE(result);
3601 return NT_STATUS_INVALID_PARAMETER;
3604 result->trans.np.cli = cli;
3605 result->abstract_syntax = *abstract_syntax;
3606 result->transfer_syntax = ndr_transfer_syntax;
3607 result->dispatch = cli_do_rpc_ndr;
3608 result->desthost = talloc_strdup(result, cli->desthost);
3609 result->srv_name_slash = talloc_asprintf_strupper_m(
3610 result, "\\\\%s", result->desthost);
3612 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3613 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3615 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3616 TALLOC_FREE(result);
3617 return NT_STATUS_NO_MEMORY;
3620 fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3621 DESIRED_ACCESS_PIPE);
3623 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3624 "to machine %s. Error was %s\n",
3625 result->trans.np.pipe_name, cli->desthost,
3627 TALLOC_FREE(result);
3628 return cli_get_nt_error(cli);
3631 result->trans.np.fnum = fnum;
3633 DLIST_ADD(cli->pipe_list, result);
3634 talloc_set_destructor(result, rpc_pipe_destructor);
3637 return NT_STATUS_OK;
3640 /****************************************************************************
3641 Open a pipe to a remote server.
3642 ****************************************************************************/
3644 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3645 const struct ndr_syntax_id *interface,
3646 struct rpc_pipe_client **presult)
3648 if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3650 * We should have a better way to figure out this drsuapi
3653 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3657 return rpc_pipe_open_np(cli, interface, presult);
3660 /****************************************************************************
3661 Open a named pipe to an SMB server and bind anonymously.
3662 ****************************************************************************/
3664 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3665 const struct ndr_syntax_id *interface,
3666 struct rpc_pipe_client **presult)
3668 struct rpc_pipe_client *result;
3669 struct cli_pipe_auth_data *auth;
3672 status = cli_rpc_pipe_open(cli, interface, &result);
3673 if (!NT_STATUS_IS_OK(status)) {
3677 status = rpccli_anon_bind_data(result, &auth);
3678 if (!NT_STATUS_IS_OK(status)) {
3679 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3680 nt_errstr(status)));
3681 TALLOC_FREE(result);
3686 * This is a bit of an abstraction violation due to the fact that an
3687 * anonymous bind on an authenticated SMB inherits the user/domain
3688 * from the enclosing SMB creds
3691 TALLOC_FREE(auth->user_name);
3692 TALLOC_FREE(auth->domain);
3694 auth->user_name = talloc_strdup(auth, cli->user_name);
3695 auth->domain = talloc_strdup(auth, cli->domain);
3696 auth->user_session_key = data_blob_talloc(auth,
3697 cli->user_session_key.data,
3698 cli->user_session_key.length);
3700 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3701 TALLOC_FREE(result);
3702 return NT_STATUS_NO_MEMORY;
3705 status = rpc_pipe_bind(result, auth);
3706 if (!NT_STATUS_IS_OK(status)) {
3708 if (ndr_syntax_id_equal(interface,
3709 &ndr_table_dssetup.syntax_id)) {
3710 /* non AD domains just don't have this pipe, avoid
3711 * level 0 statement in that case - gd */
3714 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3715 "%s failed with error %s\n",
3716 cli_get_pipe_name_from_iface(debug_ctx(), cli,
3718 nt_errstr(status) ));
3719 TALLOC_FREE(result);
3723 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3724 "%s and bound anonymously.\n", result->trans.np.pipe_name,
3728 return NT_STATUS_OK;
3731 /****************************************************************************
3732 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3733 ****************************************************************************/
3735 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3736 const struct ndr_syntax_id *interface,
3737 enum pipe_auth_type auth_type,
3738 enum pipe_auth_level auth_level,
3740 const char *username,
3741 const char *password,
3742 struct rpc_pipe_client **presult)
3744 struct rpc_pipe_client *result;
3745 struct cli_pipe_auth_data *auth;
3748 status = cli_rpc_pipe_open(cli, interface, &result);
3749 if (!NT_STATUS_IS_OK(status)) {
3753 status = rpccli_ntlmssp_bind_data(
3754 result, auth_type, auth_level, domain, username,
3755 cli->pwd.null_pwd ? NULL : password, &auth);
3756 if (!NT_STATUS_IS_OK(status)) {
3757 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3758 nt_errstr(status)));
3762 status = rpc_pipe_bind(result, auth);
3763 if (!NT_STATUS_IS_OK(status)) {
3764 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3765 nt_errstr(status) ));
3769 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3770 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3771 result->trans.np.pipe_name, cli->desthost,
3772 domain, username ));
3775 return NT_STATUS_OK;
3779 TALLOC_FREE(result);
3783 /****************************************************************************
3785 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3786 ****************************************************************************/
3788 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3789 const struct ndr_syntax_id *interface,
3790 enum pipe_auth_level auth_level,
3792 const char *username,
3793 const char *password,
3794 struct rpc_pipe_client **presult)
3796 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3798 PIPE_AUTH_TYPE_NTLMSSP,
3806 /****************************************************************************
3808 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3809 ****************************************************************************/
3811 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3812 const struct ndr_syntax_id *interface,
3813 enum pipe_auth_level auth_level,
3815 const char *username,
3816 const char *password,
3817 struct rpc_pipe_client **presult)
3819 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3821 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3829 /****************************************************************************
3830 Get a the schannel session key out of an already opened netlogon pipe.
3831 ****************************************************************************/
3832 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3833 struct cli_state *cli,
3837 uint32 sec_chan_type = 0;
3838 unsigned char machine_pwd[16];
3839 const char *machine_account;
3842 /* Get the machine account credentials from secrets.tdb. */
3843 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3846 DEBUG(0, ("get_schannel_session_key: could not fetch "
3847 "trust account password for domain '%s'\n",
3849 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3852 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3853 cli->desthost, /* server name */
3854 domain, /* domain */
3855 global_myname(), /* client name */
3856 machine_account, /* machine account name */
3861 if (!NT_STATUS_IS_OK(status)) {
3862 DEBUG(3, ("get_schannel_session_key_common: "
3863 "rpccli_netlogon_setup_creds failed with result %s "
3864 "to server %s, domain %s, machine account %s.\n",
3865 nt_errstr(status), cli->desthost, domain,
3870 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3871 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3873 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3876 return NT_STATUS_OK;;
3879 /****************************************************************************
3880 Open a netlogon pipe and get the schannel session key.
3881 Now exposed to external callers.
3882 ****************************************************************************/
3885 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3888 struct rpc_pipe_client **presult)
3890 struct rpc_pipe_client *netlogon_pipe = NULL;
3893 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3895 if (!NT_STATUS_IS_OK(status)) {
3899 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3901 if (!NT_STATUS_IS_OK(status)) {
3902 TALLOC_FREE(netlogon_pipe);
3906 *presult = netlogon_pipe;
3907 return NT_STATUS_OK;
3910 /****************************************************************************
3912 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3913 using session_key. sign and seal.
3914 ****************************************************************************/
3916 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3917 const struct ndr_syntax_id *interface,
3918 enum pipe_auth_level auth_level,
3920 const struct dcinfo *pdc,
3921 struct rpc_pipe_client **presult)
3923 struct rpc_pipe_client *result;
3924 struct cli_pipe_auth_data *auth;
3927 status = cli_rpc_pipe_open(cli, interface, &result);
3928 if (!NT_STATUS_IS_OK(status)) {
3932 status = rpccli_schannel_bind_data(result, domain, auth_level,
3933 pdc->sess_key, &auth);
3934 if (!NT_STATUS_IS_OK(status)) {
3935 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3936 nt_errstr(status)));
3937 TALLOC_FREE(result);
3941 status = rpc_pipe_bind(result, auth);
3942 if (!NT_STATUS_IS_OK(status)) {
3943 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3944 "cli_rpc_pipe_bind failed with error %s\n",
3945 nt_errstr(status) ));
3946 TALLOC_FREE(result);
3951 * The credentials on a new netlogon pipe are the ones we are passed
3952 * in - copy them over.
3954 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3955 if (result->dc == NULL) {
3956 DEBUG(0, ("talloc failed\n"));
3957 TALLOC_FREE(result);
3958 return NT_STATUS_NO_MEMORY;
3961 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3963 "and bound using schannel.\n",
3964 result->trans.np.pipe_name, cli->desthost, domain ));
3967 return NT_STATUS_OK;
3970 /****************************************************************************
3971 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3972 Fetch the session key ourselves using a temporary netlogon pipe. This
3973 version uses an ntlmssp auth bound netlogon pipe to get the key.
3974 ****************************************************************************/
3976 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3978 const char *username,
3979 const char *password,
3981 struct rpc_pipe_client **presult)
3983 struct rpc_pipe_client *netlogon_pipe = NULL;
3986 status = cli_rpc_pipe_open_spnego_ntlmssp(
3987 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3988 domain, username, password, &netlogon_pipe);
3989 if (!NT_STATUS_IS_OK(status)) {
3993 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3995 if (!NT_STATUS_IS_OK(status)) {
3996 TALLOC_FREE(netlogon_pipe);
4000 *presult = netlogon_pipe;
4001 return NT_STATUS_OK;
4004 /****************************************************************************
4005 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4006 Fetch the session key ourselves using a temporary netlogon pipe. This version
4007 uses an ntlmssp bind to get the session key.
4008 ****************************************************************************/
4010 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4011 const struct ndr_syntax_id *interface,
4012 enum pipe_auth_level auth_level,
4014 const char *username,
4015 const char *password,
4016 struct rpc_pipe_client **presult)
4018 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4019 struct rpc_pipe_client *netlogon_pipe = NULL;
4020 struct rpc_pipe_client *result = NULL;
4023 status = get_schannel_session_key_auth_ntlmssp(
4024 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4025 if (!NT_STATUS_IS_OK(status)) {
4026 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4027 "key from server %s for domain %s.\n",
4028 cli->desthost, domain ));
4032 status = cli_rpc_pipe_open_schannel_with_key(
4033 cli, interface, auth_level, domain, netlogon_pipe->dc,
4036 /* Now we've bound using the session key we can close the netlog pipe. */
4037 TALLOC_FREE(netlogon_pipe);
4039 if (NT_STATUS_IS_OK(status)) {
4045 /****************************************************************************
4046 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4047 Fetch the session key ourselves using a temporary netlogon pipe.
4048 ****************************************************************************/
4050 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4051 const struct ndr_syntax_id *interface,
4052 enum pipe_auth_level auth_level,
4054 struct rpc_pipe_client **presult)
4056 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4057 struct rpc_pipe_client *netlogon_pipe = NULL;
4058 struct rpc_pipe_client *result = NULL;
4061 status = get_schannel_session_key(cli, domain, &neg_flags,
4063 if (!NT_STATUS_IS_OK(status)) {
4064 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4065 "key from server %s for domain %s.\n",
4066 cli->desthost, domain ));
4070 status = cli_rpc_pipe_open_schannel_with_key(
4071 cli, interface, auth_level, domain, netlogon_pipe->dc,
4074 /* Now we've bound using the session key we can close the netlog pipe. */
4075 TALLOC_FREE(netlogon_pipe);
4077 if (NT_STATUS_IS_OK(status)) {
4081 return NT_STATUS_OK;
4084 /****************************************************************************
4085 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4086 The idea is this can be called with service_princ, username and password all
4087 NULL so long as the caller has a TGT.
4088 ****************************************************************************/
4090 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4091 const struct ndr_syntax_id *interface,
4092 enum pipe_auth_level auth_level,
4093 const char *service_princ,
4094 const char *username,
4095 const char *password,
4096 struct rpc_pipe_client **presult)
4099 struct rpc_pipe_client *result;
4100 struct cli_pipe_auth_data *auth;
4103 status = cli_rpc_pipe_open(cli, interface, &result);
4104 if (!NT_STATUS_IS_OK(status)) {
4108 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4109 username, password, &auth);
4110 if (!NT_STATUS_IS_OK(status)) {
4111 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4112 nt_errstr(status)));
4113 TALLOC_FREE(result);
4117 status = rpc_pipe_bind(result, auth);
4118 if (!NT_STATUS_IS_OK(status)) {
4119 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4120 "with error %s\n", nt_errstr(status)));
4121 TALLOC_FREE(result);
4126 return NT_STATUS_OK;
4128 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4129 return NT_STATUS_NOT_IMPLEMENTED;
4133 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4134 struct rpc_pipe_client *cli,
4135 DATA_BLOB *session_key)
4137 if (!session_key || !cli) {
4138 return NT_STATUS_INVALID_PARAMETER;
4142 return NT_STATUS_INVALID_PARAMETER;
4145 switch (cli->auth->auth_type) {
4146 case PIPE_AUTH_TYPE_SCHANNEL:
4147 *session_key = data_blob_talloc(mem_ctx,
4148 cli->auth->a_u.schannel_auth->sess_key, 16);
4150 case PIPE_AUTH_TYPE_NTLMSSP:
4151 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4152 *session_key = data_blob_talloc(mem_ctx,
4153 cli->auth->a_u.ntlmssp_state->session_key.data,
4154 cli->auth->a_u.ntlmssp_state->session_key.length);
4156 case PIPE_AUTH_TYPE_KRB5:
4157 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4158 *session_key = data_blob_talloc(mem_ctx,
4159 cli->auth->a_u.kerberos_auth->session_key.data,
4160 cli->auth->a_u.kerberos_auth->session_key.length);
4162 case PIPE_AUTH_TYPE_NONE:
4163 *session_key = data_blob_talloc(mem_ctx,
4164 cli->auth->user_session_key.data,
4165 cli->auth->user_session_key.length);
4168 return NT_STATUS_NO_USER_SESSION_KEY;
4171 return NT_STATUS_OK;
4175 * Create a new RPC client context which uses a local dispatch function.
4177 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax,
4178 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4179 struct auth_serversupplied_info *serversupplied_info,
4180 struct rpc_pipe_client **presult)
4182 struct rpc_pipe_client *result;
4184 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4185 if (result == NULL) {
4186 return NT_STATUS_NO_MEMORY;
4189 result->transport_type = NCACN_INTERNAL;
4191 result->abstract_syntax = *abstract_syntax;
4192 result->transfer_syntax = ndr_transfer_syntax;
4193 result->dispatch = dispatch;
4195 result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4196 if (result->pipes_struct == NULL) {
4197 TALLOC_FREE(result);
4198 return NT_STATUS_NO_MEMORY;
4200 result->pipes_struct->mem_ctx = mem_ctx;
4201 result->pipes_struct->server_info = serversupplied_info;
4202 result->pipes_struct->pipe_bound = true;
4204 result->max_xmit_frag = -1;
4205 result->max_recv_frag = -1;
4208 return NT_STATUS_OK;