2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &syntax_spoolss },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85 struct cli_state *cli,
86 const struct ndr_syntax_id *interface)
89 for (i = 0; pipe_names[i].client_pipe; i++) {
90 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
92 return &pipe_names[i].client_pipe[5];
97 * Here we should ask \\epmapper, but for now our code is only
98 * interested in the known pipes mentioned in pipe_names[]
104 /********************************************************************
105 Map internal value to wire value.
106 ********************************************************************/
108 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
112 case PIPE_AUTH_TYPE_NONE:
113 return RPC_ANONYMOUS_AUTH_TYPE;
115 case PIPE_AUTH_TYPE_NTLMSSP:
116 return RPC_NTLMSSP_AUTH_TYPE;
118 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
119 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
120 return RPC_SPNEGO_AUTH_TYPE;
122 case PIPE_AUTH_TYPE_SCHANNEL:
123 return RPC_SCHANNEL_AUTH_TYPE;
125 case PIPE_AUTH_TYPE_KRB5:
126 return RPC_KRB5_AUTH_TYPE;
129 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
131 (unsigned int)auth_type ));
137 /********************************************************************
138 Pipe description for a DEBUG
139 ********************************************************************/
140 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
144 switch (cli->transport_type) {
146 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
149 cli->trans.np.pipe_name,
150 (unsigned int)(cli->trans.np.fnum));
153 case NCACN_UNIX_STREAM:
154 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
155 cli->desthost, cli->trans.sock.fd);
158 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
161 SMB_ASSERT(result != NULL);
165 /********************************************************************
167 ********************************************************************/
169 static uint32 get_rpc_call_id(void)
171 static uint32 call_id = 0;
176 * Realloc pdu to have a least "size" bytes
179 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
183 if (prs_data_size(pdu) >= size) {
187 extra_size = size - prs_data_size(pdu);
189 if (!prs_force_grow(pdu, extra_size)) {
190 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
191 "%d bytes.\n", (int)extra_size));
195 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
196 (int)extra_size, prs_data_size(pdu)));
201 /*******************************************************************
202 Use SMBreadX to get rest of one fragment's worth of rpc data.
203 Reads the whole size or give an error message
204 ********************************************************************/
206 struct rpc_read_state {
207 struct event_context *ev;
208 struct rpc_pipe_client *cli;
214 static void rpc_read_np_done(struct async_req *subreq);
215 static void rpc_read_sock_done(struct async_req *subreq);
217 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
218 struct event_context *ev,
219 struct rpc_pipe_client *cli,
220 char *data, size_t size)
222 struct async_req *result, *subreq;
223 struct rpc_read_state *state;
225 if (!async_req_setup(mem_ctx, &result, &state,
226 struct rpc_read_state)) {
235 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
237 if (cli->transport_type == NCACN_NP) {
238 subreq = cli_read_andx_send(
239 state, ev, cli->trans.np.cli,
240 cli->trans.np.fnum, 0, size);
241 if (subreq == NULL) {
242 DEBUG(10, ("cli_read_andx_send failed\n"));
245 subreq->async.fn = rpc_read_np_done;
246 subreq->async.priv = result;
250 if ((cli->transport_type == NCACN_IP_TCP)
251 || (cli->transport_type == NCACN_UNIX_STREAM)) {
252 subreq = recvall_send(state, ev, cli->trans.sock.fd,
254 if (subreq == NULL) {
255 DEBUG(10, ("recvall_send failed\n"));
258 subreq->async.fn = rpc_read_sock_done;
259 subreq->async.priv = result;
263 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
271 static void rpc_read_np_done(struct async_req *subreq)
273 struct async_req *req = talloc_get_type_abort(
274 subreq->async.priv, struct async_req);
275 struct rpc_read_state *state = talloc_get_type_abort(
276 req->private_data, struct rpc_read_state);
281 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
283 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
286 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
287 status = NT_STATUS_OK;
289 if (!NT_STATUS_IS_OK(status)) {
291 async_req_error(req, status);
295 memcpy(state->data + state->num_read, rcvbuf, received);
298 state->num_read += received;
300 if (state->num_read == state->size) {
305 subreq = cli_read_andx_send(
306 state, state->ev, state->cli->trans.np.cli,
307 state->cli->trans.np.fnum, 0,
308 state->size - state->num_read);
310 if (async_req_nomem(subreq, req)) {
314 subreq->async.fn = rpc_read_np_done;
315 subreq->async.priv = req;
318 static void rpc_read_sock_done(struct async_req *subreq)
320 struct async_req *req = talloc_get_type_abort(
321 subreq->async.priv, struct async_req);
324 status = recvall_recv(subreq);
326 if (!NT_STATUS_IS_OK(status)) {
327 async_req_error(req, status);
334 static NTSTATUS rpc_read_recv(struct async_req *req)
336 return async_req_simple_recv(req);
339 struct rpc_write_state {
340 struct event_context *ev;
341 struct rpc_pipe_client *cli;
347 static void rpc_write_np_done(struct async_req *subreq);
348 static void rpc_write_sock_done(struct async_req *subreq);
350 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
351 struct event_context *ev,
352 struct rpc_pipe_client *cli,
353 const char *data, size_t size)
355 struct async_req *result, *subreq;
356 struct rpc_write_state *state;
358 if (!async_req_setup(mem_ctx, &result, &state,
359 struct rpc_write_state)) {
366 state->num_written = 0;
368 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
370 if (cli->transport_type == NCACN_NP) {
371 subreq = cli_write_andx_send(
372 state, ev, cli->trans.np.cli,
373 cli->trans.np.fnum, 8, /* 8 means message mode. */
374 (uint8_t *)data, 0, size);
375 if (subreq == NULL) {
376 DEBUG(10, ("cli_write_andx_send failed\n"));
379 subreq->async.fn = rpc_write_np_done;
380 subreq->async.priv = result;
384 if ((cli->transport_type == NCACN_IP_TCP)
385 || (cli->transport_type == NCACN_UNIX_STREAM)) {
386 subreq = sendall_send(state, ev, cli->trans.sock.fd,
388 if (subreq == NULL) {
389 DEBUG(10, ("sendall_send failed\n"));
392 subreq->async.fn = rpc_write_sock_done;
393 subreq->async.priv = result;
397 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
405 static void rpc_write_np_done(struct async_req *subreq)
407 struct async_req *req = talloc_get_type_abort(
408 subreq->async.priv, struct async_req);
409 struct rpc_write_state *state = talloc_get_type_abort(
410 req->private_data, struct rpc_write_state);
414 status = cli_write_andx_recv(subreq, &written);
416 if (!NT_STATUS_IS_OK(status)) {
417 async_req_error(req, status);
421 state->num_written += written;
423 if (state->num_written == state->size) {
428 subreq = cli_write_andx_send(
429 state, state->ev, state->cli->trans.np.cli,
430 state->cli->trans.np.fnum, 8,
431 (uint8_t *)(state->data + state->num_written),
432 0, state->size - state->num_written);
434 if (async_req_nomem(subreq, req)) {
438 subreq->async.fn = rpc_write_np_done;
439 subreq->async.priv = req;
442 static void rpc_write_sock_done(struct async_req *subreq)
444 struct async_req *req = talloc_get_type_abort(
445 subreq->async.priv, struct async_req);
448 status = sendall_recv(subreq);
450 if (!NT_STATUS_IS_OK(status)) {
451 async_req_error(req, status);
458 static NTSTATUS rpc_write_recv(struct async_req *req)
460 return async_req_simple_recv(req);
464 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
465 struct rpc_hdr_info *prhdr,
469 * This next call sets the endian bit correctly in current_pdu. We
470 * will propagate this to rbuf later.
473 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
474 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
475 return NT_STATUS_BUFFER_TOO_SMALL;
478 if (prhdr->frag_len > cli->max_recv_frag) {
479 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
480 " we only allow %d\n", (int)prhdr->frag_len,
481 (int)cli->max_recv_frag));
482 return NT_STATUS_BUFFER_TOO_SMALL;
488 /****************************************************************************
489 Try and get a PDU's worth of data from current_pdu. If not, then read more
491 ****************************************************************************/
493 struct get_complete_frag_state {
494 struct event_context *ev;
495 struct rpc_pipe_client *cli;
496 struct rpc_hdr_info *prhdr;
500 static void get_complete_frag_got_header(struct async_req *subreq);
501 static void get_complete_frag_got_rest(struct async_req *subreq);
503 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
504 struct event_context *ev,
505 struct rpc_pipe_client *cli,
506 struct rpc_hdr_info *prhdr,
509 struct async_req *result, *subreq;
510 struct get_complete_frag_state *state;
514 if (!async_req_setup(mem_ctx, &result, &state,
515 struct get_complete_frag_state)) {
520 state->prhdr = prhdr;
523 pdu_len = prs_data_size(pdu);
524 if (pdu_len < RPC_HEADER_LEN) {
525 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
526 status = NT_STATUS_NO_MEMORY;
529 subreq = rpc_read_send(state, state->ev, state->cli,
530 prs_data_p(state->pdu) + pdu_len,
531 RPC_HEADER_LEN - pdu_len);
532 if (subreq == NULL) {
533 status = NT_STATUS_NO_MEMORY;
536 subreq->async.fn = get_complete_frag_got_header;
537 subreq->async.priv = result;
541 status = parse_rpc_header(cli, prhdr, pdu);
542 if (!NT_STATUS_IS_OK(status)) {
547 * Ensure we have frag_len bytes of data.
549 if (pdu_len < prhdr->frag_len) {
550 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
551 status = NT_STATUS_NO_MEMORY;
554 subreq = rpc_read_send(state, state->ev, state->cli,
555 prs_data_p(pdu) + pdu_len,
556 prhdr->frag_len - pdu_len);
557 if (subreq == NULL) {
558 status = NT_STATUS_NO_MEMORY;
561 subreq->async.fn = get_complete_frag_got_rest;
562 subreq->async.priv = result;
566 status = NT_STATUS_OK;
568 if (async_post_status(result, ev, status)) {
575 static void get_complete_frag_got_header(struct async_req *subreq)
577 struct async_req *req = talloc_get_type_abort(
578 subreq->async.priv, struct async_req);
579 struct get_complete_frag_state *state = talloc_get_type_abort(
580 req->private_data, struct get_complete_frag_state);
583 status = rpc_read_recv(subreq);
585 if (!NT_STATUS_IS_OK(status)) {
586 async_req_error(req, status);
590 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
591 if (!NT_STATUS_IS_OK(status)) {
592 async_req_error(req, status);
596 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
597 async_req_error(req, NT_STATUS_NO_MEMORY);
602 * We're here in this piece of code because we've read exactly
603 * RPC_HEADER_LEN bytes into state->pdu.
606 subreq = rpc_read_send(state, state->ev, state->cli,
607 prs_data_p(state->pdu) + RPC_HEADER_LEN,
608 state->prhdr->frag_len - RPC_HEADER_LEN);
609 if (async_req_nomem(subreq, req)) {
612 subreq->async.fn = get_complete_frag_got_rest;
613 subreq->async.priv = req;
616 static void get_complete_frag_got_rest(struct async_req *subreq)
618 struct async_req *req = talloc_get_type_abort(
619 subreq->async.priv, struct async_req);
622 status = rpc_read_recv(subreq);
624 if (!NT_STATUS_IS_OK(status)) {
625 async_req_error(req, status);
631 static NTSTATUS get_complete_frag_recv(struct async_req *req)
633 return async_req_simple_recv(req);
636 /****************************************************************************
637 NTLMSSP specific sign/seal.
638 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
639 In fact I should probably abstract these into identical pieces of code... JRA.
640 ****************************************************************************/
642 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
643 prs_struct *current_pdu,
644 uint8 *p_ss_padding_len)
646 RPC_HDR_AUTH auth_info;
647 uint32 save_offset = prs_offset(current_pdu);
648 uint32 auth_len = prhdr->auth_len;
649 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
650 unsigned char *data = NULL;
652 unsigned char *full_packet_data = NULL;
653 size_t full_packet_data_len;
657 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
658 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
662 if (!ntlmssp_state) {
663 return NT_STATUS_INVALID_PARAMETER;
666 /* Ensure there's enough data for an authenticated response. */
667 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
668 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
669 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
670 (unsigned int)auth_len ));
671 return NT_STATUS_BUFFER_TOO_SMALL;
675 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
676 * after the RPC header.
677 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
678 * functions as NTLMv2 checks the rpc headers also.
681 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
682 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
684 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
685 full_packet_data_len = prhdr->frag_len - auth_len;
687 /* Pull the auth header and the following data into a blob. */
688 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
689 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
690 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
691 return NT_STATUS_BUFFER_TOO_SMALL;
694 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
695 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
696 return NT_STATUS_BUFFER_TOO_SMALL;
699 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
700 auth_blob.length = auth_len;
702 switch (cli->auth->auth_level) {
703 case PIPE_AUTH_LEVEL_PRIVACY:
704 /* Data is encrypted. */
705 status = ntlmssp_unseal_packet(ntlmssp_state,
708 full_packet_data_len,
710 if (!NT_STATUS_IS_OK(status)) {
711 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
712 "packet from %s. Error was %s.\n",
713 rpccli_pipe_txt(debug_ctx(), cli),
714 nt_errstr(status) ));
718 case PIPE_AUTH_LEVEL_INTEGRITY:
719 /* Data is signed. */
720 status = ntlmssp_check_packet(ntlmssp_state,
723 full_packet_data_len,
725 if (!NT_STATUS_IS_OK(status)) {
726 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
727 "packet from %s. Error was %s.\n",
728 rpccli_pipe_txt(debug_ctx(), cli),
729 nt_errstr(status) ));
734 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
735 "auth level %d\n", cli->auth->auth_level));
736 return NT_STATUS_INVALID_INFO_CLASS;
740 * Return the current pointer to the data offset.
743 if(!prs_set_offset(current_pdu, save_offset)) {
744 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
745 (unsigned int)save_offset ));
746 return NT_STATUS_BUFFER_TOO_SMALL;
750 * Remember the padding length. We must remove it from the real data
751 * stream once the sign/seal is done.
754 *p_ss_padding_len = auth_info.auth_pad_len;
759 /****************************************************************************
760 schannel specific sign/seal.
761 ****************************************************************************/
763 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
764 prs_struct *current_pdu,
765 uint8 *p_ss_padding_len)
767 RPC_HDR_AUTH auth_info;
768 RPC_AUTH_SCHANNEL_CHK schannel_chk;
769 uint32 auth_len = prhdr->auth_len;
770 uint32 save_offset = prs_offset(current_pdu);
771 struct schannel_auth_struct *schannel_auth =
772 cli->auth->a_u.schannel_auth;
775 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
776 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
780 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
781 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
782 return NT_STATUS_INVALID_PARAMETER;
785 if (!schannel_auth) {
786 return NT_STATUS_INVALID_PARAMETER;
789 /* Ensure there's enough data for an authenticated response. */
790 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
791 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
792 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
793 (unsigned int)auth_len ));
794 return NT_STATUS_INVALID_PARAMETER;
797 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
799 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
800 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
801 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
802 return NT_STATUS_BUFFER_TOO_SMALL;
805 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
806 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
807 return NT_STATUS_BUFFER_TOO_SMALL;
810 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
811 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
812 auth_info.auth_type));
813 return NT_STATUS_BUFFER_TOO_SMALL;
816 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
817 &schannel_chk, current_pdu, 0)) {
818 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
819 return NT_STATUS_BUFFER_TOO_SMALL;
822 if (!schannel_decode(schannel_auth,
823 cli->auth->auth_level,
826 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
828 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
829 "Connection to %s.\n",
830 rpccli_pipe_txt(debug_ctx(), cli)));
831 return NT_STATUS_INVALID_PARAMETER;
834 /* The sequence number gets incremented on both send and receive. */
835 schannel_auth->seq_num++;
838 * Return the current pointer to the data offset.
841 if(!prs_set_offset(current_pdu, save_offset)) {
842 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
843 (unsigned int)save_offset ));
844 return NT_STATUS_BUFFER_TOO_SMALL;
848 * Remember the padding length. We must remove it from the real data
849 * stream once the sign/seal is done.
852 *p_ss_padding_len = auth_info.auth_pad_len;
857 /****************************************************************************
858 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
859 ****************************************************************************/
861 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
862 prs_struct *current_pdu,
863 uint8 *p_ss_padding_len)
865 NTSTATUS ret = NT_STATUS_OK;
867 /* Paranioa checks for auth_len. */
868 if (prhdr->auth_len) {
869 if (prhdr->auth_len > prhdr->frag_len) {
870 return NT_STATUS_INVALID_PARAMETER;
873 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
874 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
875 /* Integer wrap attempt. */
876 return NT_STATUS_INVALID_PARAMETER;
881 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
884 switch(cli->auth->auth_type) {
885 case PIPE_AUTH_TYPE_NONE:
886 if (prhdr->auth_len) {
887 DEBUG(3, ("cli_pipe_validate_rpc_response: "
888 "Connection to %s - got non-zero "
890 rpccli_pipe_txt(debug_ctx(), cli),
891 (unsigned int)prhdr->auth_len ));
892 return NT_STATUS_INVALID_PARAMETER;
896 case PIPE_AUTH_TYPE_NTLMSSP:
897 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
898 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
899 if (!NT_STATUS_IS_OK(ret)) {
904 case PIPE_AUTH_TYPE_SCHANNEL:
905 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
906 if (!NT_STATUS_IS_OK(ret)) {
911 case PIPE_AUTH_TYPE_KRB5:
912 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
914 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
915 "to %s - unknown internal auth type %u.\n",
916 rpccli_pipe_txt(debug_ctx(), cli),
917 cli->auth->auth_type ));
918 return NT_STATUS_INVALID_INFO_CLASS;
924 /****************************************************************************
925 Do basic authentication checks on an incoming pdu.
926 ****************************************************************************/
928 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
929 prs_struct *current_pdu,
930 uint8 expected_pkt_type,
933 prs_struct *return_data)
936 NTSTATUS ret = NT_STATUS_OK;
937 uint32 current_pdu_len = prs_data_size(current_pdu);
939 if (current_pdu_len != prhdr->frag_len) {
940 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
941 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
942 return NT_STATUS_INVALID_PARAMETER;
946 * Point the return values at the real data including the RPC
947 * header. Just in case the caller wants it.
949 *ppdata = prs_data_p(current_pdu);
950 *pdata_len = current_pdu_len;
952 /* Ensure we have the correct type. */
953 switch (prhdr->pkt_type) {
954 case RPC_ALTCONTRESP:
957 /* Alter context and bind ack share the same packet definitions. */
963 RPC_HDR_RESP rhdr_resp;
964 uint8 ss_padding_len = 0;
966 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
967 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
968 return NT_STATUS_BUFFER_TOO_SMALL;
971 /* Here's where we deal with incoming sign/seal. */
972 ret = cli_pipe_validate_rpc_response(cli, prhdr,
973 current_pdu, &ss_padding_len);
974 if (!NT_STATUS_IS_OK(ret)) {
978 /* Point the return values at the NDR data. Remember to remove any ss padding. */
979 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
981 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
982 return NT_STATUS_BUFFER_TOO_SMALL;
985 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
987 /* Remember to remove the auth footer. */
988 if (prhdr->auth_len) {
989 /* We've already done integer wrap tests on auth_len in
990 cli_pipe_validate_rpc_response(). */
991 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
992 return NT_STATUS_BUFFER_TOO_SMALL;
994 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
997 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
998 current_pdu_len, *pdata_len, ss_padding_len ));
1001 * If this is the first reply, and the allocation hint is reasonably, try and
1002 * set up the return_data parse_struct to the correct size.
1005 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1006 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1007 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1008 "too large to allocate\n",
1009 (unsigned int)rhdr_resp.alloc_hint ));
1010 return NT_STATUS_NO_MEMORY;
1018 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1019 "received from %s!\n",
1020 rpccli_pipe_txt(debug_ctx(), cli)));
1021 /* Use this for now... */
1022 return NT_STATUS_NETWORK_ACCESS_DENIED;
1026 RPC_HDR_RESP rhdr_resp;
1027 RPC_HDR_FAULT fault_resp;
1029 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1030 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1031 return NT_STATUS_BUFFER_TOO_SMALL;
1034 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1035 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1036 return NT_STATUS_BUFFER_TOO_SMALL;
1039 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1040 "code %s received from %s!\n",
1041 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1042 rpccli_pipe_txt(debug_ctx(), cli)));
1043 if (NT_STATUS_IS_OK(fault_resp.status)) {
1044 return NT_STATUS_UNSUCCESSFUL;
1046 return fault_resp.status;
1051 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1053 (unsigned int)prhdr->pkt_type,
1054 rpccli_pipe_txt(debug_ctx(), cli)));
1055 return NT_STATUS_INVALID_INFO_CLASS;
1058 if (prhdr->pkt_type != expected_pkt_type) {
1059 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1060 "got an unexpected RPC packet type - %u, not %u\n",
1061 rpccli_pipe_txt(debug_ctx(), cli),
1063 expected_pkt_type));
1064 return NT_STATUS_INVALID_INFO_CLASS;
1067 /* Do this just before return - we don't want to modify any rpc header
1068 data before now as we may have needed to do cryptographic actions on
1071 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1072 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1073 "setting fragment first/last ON.\n"));
1074 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1077 return NT_STATUS_OK;
1080 /****************************************************************************
1081 Ensure we eat the just processed pdu from the current_pdu prs_struct.
1082 Normally the frag_len and buffer size will match, but on the first trans
1083 reply there is a theoretical chance that buffer size > frag_len, so we must
1085 ****************************************************************************/
1087 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1089 uint32 current_pdu_len = prs_data_size(current_pdu);
1091 if (current_pdu_len < prhdr->frag_len) {
1092 return NT_STATUS_BUFFER_TOO_SMALL;
1096 if (current_pdu_len == (uint32)prhdr->frag_len) {
1097 prs_mem_free(current_pdu);
1098 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1099 /* Make current_pdu dynamic with no memory. */
1100 prs_give_memory(current_pdu, 0, 0, True);
1101 return NT_STATUS_OK;
1105 * Oh no ! More data in buffer than we processed in current pdu.
1106 * Cheat. Move the data down and shrink the buffer.
1109 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1110 current_pdu_len - prhdr->frag_len);
1112 /* Remember to set the read offset back to zero. */
1113 prs_set_offset(current_pdu, 0);
1115 /* Shrink the buffer. */
1116 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1117 return NT_STATUS_BUFFER_TOO_SMALL;
1120 return NT_STATUS_OK;
1123 /****************************************************************************
1124 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1125 ****************************************************************************/
1127 struct cli_api_pipe_state {
1128 struct event_context *ev;
1129 struct rpc_pipe_client *cli;
1130 uint32_t max_rdata_len;
1135 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1137 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1139 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1140 struct event_context *ev,
1141 struct rpc_pipe_client *cli,
1142 uint8_t *data, size_t data_len,
1143 uint32_t max_rdata_len)
1145 struct async_req *result, *subreq;
1146 struct cli_api_pipe_state *state;
1149 if (!async_req_setup(mem_ctx, &result, &state,
1150 struct cli_api_pipe_state)) {
1155 state->max_rdata_len = max_rdata_len;
1157 if (state->max_rdata_len < RPC_HEADER_LEN) {
1159 * For a RPC reply we always need at least RPC_HEADER_LEN
1160 * bytes. We check this here because we will receive
1161 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1163 status = NT_STATUS_INVALID_PARAMETER;
1167 if (cli->transport_type == NCACN_NP) {
1170 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1171 SSVAL(setup+1, 0, cli->trans.np.fnum);
1173 subreq = cli_trans_send(
1174 state, ev, cli->trans.np.cli, SMBtrans,
1175 "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1176 NULL, 0, 0, data, data_len, max_rdata_len);
1177 if (subreq == NULL) {
1178 status = NT_STATUS_NO_MEMORY;
1181 subreq->async.fn = cli_api_pipe_np_trans_done;
1182 subreq->async.priv = result;
1186 if ((cli->transport_type == NCACN_IP_TCP)
1187 || (cli->transport_type == NCACN_UNIX_STREAM)) {
1188 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1190 if (subreq == NULL) {
1191 status = NT_STATUS_NO_MEMORY;
1194 subreq->async.fn = cli_api_pipe_sock_send_done;
1195 subreq->async.priv = result;
1199 status = NT_STATUS_INVALID_PARAMETER;
1202 if (async_post_status(result, ev, status)) {
1205 TALLOC_FREE(result);
1209 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1211 struct async_req *req = talloc_get_type_abort(
1212 subreq->async.priv, struct async_req);
1213 struct cli_api_pipe_state *state = talloc_get_type_abort(
1214 req->private_data, struct cli_api_pipe_state);
1217 status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1218 &state->rdata, &state->rdata_len);
1219 TALLOC_FREE(subreq);
1220 if (!NT_STATUS_IS_OK(status)) {
1221 async_req_error(req, status);
1224 async_req_done(req);
1227 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1229 struct async_req *req = talloc_get_type_abort(
1230 subreq->async.priv, struct async_req);
1231 struct cli_api_pipe_state *state = talloc_get_type_abort(
1232 req->private_data, struct cli_api_pipe_state);
1235 status = sendall_recv(subreq);
1236 TALLOC_FREE(subreq);
1237 if (!NT_STATUS_IS_OK(status)) {
1238 async_req_error(req, status);
1242 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1243 if (async_req_nomem(state->rdata, req)) {
1246 state->rdata_len = RPC_HEADER_LEN;
1248 subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1249 state->rdata, RPC_HEADER_LEN, 0);
1250 if (async_req_nomem(subreq, req)) {
1253 subreq->async.fn = cli_api_pipe_sock_read_done;
1254 subreq->async.priv = req;
1257 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1259 struct async_req *req = talloc_get_type_abort(
1260 subreq->async.priv, struct async_req);
1263 status = recvall_recv(subreq);
1264 TALLOC_FREE(subreq);
1265 if (!NT_STATUS_IS_OK(status)) {
1266 async_req_error(req, status);
1269 async_req_done(req);
1272 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1273 uint8_t **prdata, uint32_t *prdata_len)
1275 struct cli_api_pipe_state *state = talloc_get_type_abort(
1276 req->private_data, struct cli_api_pipe_state);
1279 if (async_req_is_error(req, &status)) {
1283 *prdata = talloc_move(mem_ctx, &state->rdata);
1284 *prdata_len = state->rdata_len;
1285 return NT_STATUS_OK;
1288 /****************************************************************************
1289 Send data on an rpc pipe via trans. The prs_struct data must be the last
1290 pdu fragment of an NDR data stream.
1292 Receive response data from an rpc pipe, which may be large...
1294 Read the first fragment: unfortunately have to use SMBtrans for the first
1295 bit, then SMBreadX for subsequent bits.
1297 If first fragment received also wasn't the last fragment, continue
1298 getting fragments until we _do_ receive the last fragment.
1300 Request/Response PDU's look like the following...
1302 |<------------------PDU len----------------------------------------------->|
1303 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1305 +------------+-----------------+-------------+---------------+-------------+
1306 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1307 +------------+-----------------+-------------+---------------+-------------+
1309 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1310 signing & sealing being negotiated.
1312 ****************************************************************************/
1314 struct rpc_api_pipe_state {
1315 struct event_context *ev;
1316 struct rpc_pipe_client *cli;
1317 uint8_t expected_pkt_type;
1319 prs_struct incoming_frag;
1320 struct rpc_hdr_info rhdr;
1322 prs_struct incoming_pdu; /* Incoming reply */
1323 uint32_t incoming_pdu_offset;
1326 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1328 prs_mem_free(&state->incoming_frag);
1329 prs_mem_free(&state->incoming_pdu);
1333 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1334 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1336 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1337 struct event_context *ev,
1338 struct rpc_pipe_client *cli,
1339 prs_struct *data, /* Outgoing PDU */
1340 uint8_t expected_pkt_type)
1342 struct async_req *result, *subreq;
1343 struct rpc_api_pipe_state *state;
1344 uint16_t max_recv_frag;
1347 if (!async_req_setup(mem_ctx, &result, &state,
1348 struct rpc_api_pipe_state)) {
1353 state->expected_pkt_type = expected_pkt_type;
1354 state->incoming_pdu_offset = 0;
1356 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1358 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1359 /* Make incoming_pdu dynamic with no memory. */
1360 prs_give_memory(&state->incoming_pdu, 0, 0, true);
1362 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1365 * Ensure we're not sending too much.
1367 if (prs_offset(data) > cli->max_xmit_frag) {
1368 status = NT_STATUS_INVALID_PARAMETER;
1372 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1374 max_recv_frag = cli->max_recv_frag;
1377 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1380 subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1381 prs_offset(data), max_recv_frag);
1382 if (subreq == NULL) {
1383 status = NT_STATUS_NO_MEMORY;
1386 subreq->async.fn = rpc_api_pipe_trans_done;
1387 subreq->async.priv = result;
1391 if (async_post_status(result, ev, status)) {
1394 TALLOC_FREE(result);
1398 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1400 struct async_req *req = talloc_get_type_abort(
1401 subreq->async.priv, struct async_req);
1402 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1403 req->private_data, struct rpc_api_pipe_state);
1405 uint8_t *rdata = NULL;
1406 uint32_t rdata_len = 0;
1409 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1410 TALLOC_FREE(subreq);
1411 if (!NT_STATUS_IS_OK(status)) {
1412 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1413 async_req_error(req, status);
1417 if (rdata == NULL) {
1418 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1419 rpccli_pipe_txt(debug_ctx(), state->cli)));
1420 async_req_done(req);
1425 * Give the memory received from cli_trans as dynamic to the current
1426 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1429 rdata_copy = (char *)memdup(rdata, rdata_len);
1431 if (async_req_nomem(rdata_copy, req)) {
1434 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1436 /* Ensure we have enough data for a pdu. */
1437 subreq = get_complete_frag_send(state, state->ev, state->cli,
1438 &state->rhdr, &state->incoming_frag);
1439 if (async_req_nomem(subreq, req)) {
1442 subreq->async.fn = rpc_api_pipe_got_pdu;
1443 subreq->async.priv = req;
1446 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1448 struct async_req *req = talloc_get_type_abort(
1449 subreq->async.priv, struct async_req);
1450 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1451 req->private_data, struct rpc_api_pipe_state);
1454 uint32_t rdata_len = 0;
1456 status = get_complete_frag_recv(subreq);
1457 TALLOC_FREE(subreq);
1458 if (!NT_STATUS_IS_OK(status)) {
1459 DEBUG(5, ("get_complete_frag failed: %s\n",
1460 nt_errstr(status)));
1461 async_req_error(req, status);
1465 status = cli_pipe_validate_current_pdu(
1466 state->cli, &state->rhdr, &state->incoming_frag,
1467 state->expected_pkt_type, &rdata, &rdata_len,
1468 &state->incoming_pdu);
1470 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1471 (unsigned)prs_data_size(&state->incoming_frag),
1472 (unsigned)state->incoming_pdu_offset,
1473 nt_errstr(status)));
1475 if (!NT_STATUS_IS_OK(status)) {
1476 async_req_error(req, status);
1480 if ((state->rhdr.flags & RPC_FLG_FIRST)
1481 && (state->rhdr.pack_type[0] == 0)) {
1483 * Set the data type correctly for big-endian data on the
1486 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1488 rpccli_pipe_txt(debug_ctx(), state->cli)));
1489 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1492 * Check endianness on subsequent packets.
1494 if (state->incoming_frag.bigendian_data
1495 != state->incoming_pdu.bigendian_data) {
1496 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1498 state->incoming_pdu.bigendian_data?"big":"little",
1499 state->incoming_frag.bigendian_data?"big":"little"));
1500 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1504 /* Now copy the data portion out of the pdu into rbuf. */
1505 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1506 async_req_error(req, NT_STATUS_NO_MEMORY);
1510 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1511 rdata, (size_t)rdata_len);
1512 state->incoming_pdu_offset += rdata_len;
1514 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1515 &state->incoming_frag);
1516 if (!NT_STATUS_IS_OK(status)) {
1517 async_req_error(req, status);
1521 if (state->rhdr.flags & RPC_FLG_LAST) {
1522 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1523 rpccli_pipe_txt(debug_ctx(), state->cli),
1524 (unsigned)prs_data_size(&state->incoming_pdu)));
1525 async_req_done(req);
1529 subreq = get_complete_frag_send(state, state->ev, state->cli,
1530 &state->rhdr, &state->incoming_frag);
1531 if (async_req_nomem(subreq, req)) {
1534 subreq->async.fn = rpc_api_pipe_got_pdu;
1535 subreq->async.priv = req;
1538 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1539 prs_struct *reply_pdu)
1541 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1542 req->private_data, struct rpc_api_pipe_state);
1545 if (async_req_is_error(req, &status)) {
1549 *reply_pdu = state->incoming_pdu;
1550 reply_pdu->mem_ctx = mem_ctx;
1553 * Prevent state->incoming_pdu from being freed in
1554 * rpc_api_pipe_state_destructor()
1556 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1558 return NT_STATUS_OK;
1561 /*******************************************************************
1562 Creates krb5 auth bind.
1563 ********************************************************************/
1565 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1566 enum pipe_auth_level auth_level,
1567 RPC_HDR_AUTH *pauth_out,
1568 prs_struct *auth_data)
1572 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1573 DATA_BLOB tkt = data_blob_null;
1574 DATA_BLOB tkt_wrapped = data_blob_null;
1576 /* We may change the pad length before marshalling. */
1577 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1579 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1580 a->service_principal ));
1582 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1584 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1585 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1588 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1590 a->service_principal,
1591 error_message(ret) ));
1593 data_blob_free(&tkt);
1594 prs_mem_free(auth_data);
1595 return NT_STATUS_INVALID_PARAMETER;
1598 /* wrap that up in a nice GSS-API wrapping */
1599 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1601 data_blob_free(&tkt);
1603 /* Auth len in the rpc header doesn't include auth_header. */
1604 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1605 data_blob_free(&tkt_wrapped);
1606 prs_mem_free(auth_data);
1607 return NT_STATUS_NO_MEMORY;
1610 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1611 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1613 data_blob_free(&tkt_wrapped);
1614 return NT_STATUS_OK;
1616 return NT_STATUS_INVALID_PARAMETER;
1620 /*******************************************************************
1621 Creates SPNEGO NTLMSSP auth bind.
1622 ********************************************************************/
1624 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1625 enum pipe_auth_level auth_level,
1626 RPC_HDR_AUTH *pauth_out,
1627 prs_struct *auth_data)
1630 DATA_BLOB null_blob = data_blob_null;
1631 DATA_BLOB request = data_blob_null;
1632 DATA_BLOB spnego_msg = data_blob_null;
1634 /* We may change the pad length before marshalling. */
1635 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1637 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1638 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1642 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1643 data_blob_free(&request);
1644 prs_mem_free(auth_data);
1648 /* Wrap this in SPNEGO. */
1649 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1651 data_blob_free(&request);
1653 /* Auth len in the rpc header doesn't include auth_header. */
1654 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1655 data_blob_free(&spnego_msg);
1656 prs_mem_free(auth_data);
1657 return NT_STATUS_NO_MEMORY;
1660 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1661 dump_data(5, spnego_msg.data, spnego_msg.length);
1663 data_blob_free(&spnego_msg);
1664 return NT_STATUS_OK;
1667 /*******************************************************************
1668 Creates NTLMSSP auth bind.
1669 ********************************************************************/
1671 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1672 enum pipe_auth_level auth_level,
1673 RPC_HDR_AUTH *pauth_out,
1674 prs_struct *auth_data)
1677 DATA_BLOB null_blob = data_blob_null;
1678 DATA_BLOB request = data_blob_null;
1680 /* We may change the pad length before marshalling. */
1681 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1683 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1684 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1688 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1689 data_blob_free(&request);
1690 prs_mem_free(auth_data);
1694 /* Auth len in the rpc header doesn't include auth_header. */
1695 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1696 data_blob_free(&request);
1697 prs_mem_free(auth_data);
1698 return NT_STATUS_NO_MEMORY;
1701 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1702 dump_data(5, request.data, request.length);
1704 data_blob_free(&request);
1705 return NT_STATUS_OK;
1708 /*******************************************************************
1709 Creates schannel auth bind.
1710 ********************************************************************/
1712 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1713 enum pipe_auth_level auth_level,
1714 RPC_HDR_AUTH *pauth_out,
1715 prs_struct *auth_data)
1717 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1719 /* We may change the pad length before marshalling. */
1720 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1722 /* Use lp_workgroup() if domain not specified */
1724 if (!cli->auth->domain || !cli->auth->domain[0]) {
1725 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1726 if (cli->auth->domain == NULL) {
1727 return NT_STATUS_NO_MEMORY;
1731 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1735 * Now marshall the data into the auth parse_struct.
1738 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1739 &schannel_neg, auth_data, 0)) {
1740 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1741 prs_mem_free(auth_data);
1742 return NT_STATUS_NO_MEMORY;
1745 return NT_STATUS_OK;
1748 /*******************************************************************
1749 Creates the internals of a DCE/RPC bind request or alter context PDU.
1750 ********************************************************************/
1752 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1753 prs_struct *rpc_out,
1755 const RPC_IFACE *abstract,
1756 const RPC_IFACE *transfer,
1757 RPC_HDR_AUTH *phdr_auth,
1758 prs_struct *pauth_info)
1762 RPC_CONTEXT rpc_ctx;
1763 uint16 auth_len = prs_offset(pauth_info);
1764 uint8 ss_padding_len = 0;
1765 uint16 frag_len = 0;
1767 /* create the RPC context. */
1768 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1770 /* create the bind request RPC_HDR_RB */
1771 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1773 /* Start building the frag length. */
1774 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1776 /* Do we need to pad ? */
1778 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1780 ss_padding_len = 8 - (data_len % 8);
1781 phdr_auth->auth_pad_len = ss_padding_len;
1783 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1786 /* Create the request RPC_HDR */
1787 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1789 /* Marshall the RPC header */
1790 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1791 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1792 return NT_STATUS_NO_MEMORY;
1795 /* Marshall the bind request data */
1796 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1797 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1798 return NT_STATUS_NO_MEMORY;
1802 * Grow the outgoing buffer to store any auth info.
1806 if (ss_padding_len) {
1808 memset(pad, '\0', 8);
1809 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1810 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1811 return NT_STATUS_NO_MEMORY;
1815 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1816 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1817 return NT_STATUS_NO_MEMORY;
1821 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1822 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1823 return NT_STATUS_NO_MEMORY;
1827 return NT_STATUS_OK;
1830 /*******************************************************************
1831 Creates a DCE/RPC bind request.
1832 ********************************************************************/
1834 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1835 prs_struct *rpc_out,
1837 const RPC_IFACE *abstract,
1838 const RPC_IFACE *transfer,
1839 enum pipe_auth_type auth_type,
1840 enum pipe_auth_level auth_level)
1842 RPC_HDR_AUTH hdr_auth;
1843 prs_struct auth_info;
1844 NTSTATUS ret = NT_STATUS_OK;
1846 ZERO_STRUCT(hdr_auth);
1847 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1848 return NT_STATUS_NO_MEMORY;
1850 switch (auth_type) {
1851 case PIPE_AUTH_TYPE_SCHANNEL:
1852 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1853 if (!NT_STATUS_IS_OK(ret)) {
1854 prs_mem_free(&auth_info);
1859 case PIPE_AUTH_TYPE_NTLMSSP:
1860 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1861 if (!NT_STATUS_IS_OK(ret)) {
1862 prs_mem_free(&auth_info);
1867 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1868 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1869 if (!NT_STATUS_IS_OK(ret)) {
1870 prs_mem_free(&auth_info);
1875 case PIPE_AUTH_TYPE_KRB5:
1876 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1877 if (!NT_STATUS_IS_OK(ret)) {
1878 prs_mem_free(&auth_info);
1883 case PIPE_AUTH_TYPE_NONE:
1887 /* "Can't" happen. */
1888 return NT_STATUS_INVALID_INFO_CLASS;
1891 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1899 prs_mem_free(&auth_info);
1903 /*******************************************************************
1904 Create and add the NTLMSSP sign/seal auth header and data.
1905 ********************************************************************/
1907 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1909 uint32 ss_padding_len,
1910 prs_struct *outgoing_pdu)
1912 RPC_HDR_AUTH auth_info;
1914 DATA_BLOB auth_blob = data_blob_null;
1915 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1917 if (!cli->auth->a_u.ntlmssp_state) {
1918 return NT_STATUS_INVALID_PARAMETER;
1921 /* Init and marshall the auth header. */
1922 init_rpc_hdr_auth(&auth_info,
1923 map_pipe_auth_type_to_rpc_auth_type(
1924 cli->auth->auth_type),
1925 cli->auth->auth_level,
1927 1 /* context id. */);
1929 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1930 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1931 data_blob_free(&auth_blob);
1932 return NT_STATUS_NO_MEMORY;
1935 switch (cli->auth->auth_level) {
1936 case PIPE_AUTH_LEVEL_PRIVACY:
1937 /* Data portion is encrypted. */
1938 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1939 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1941 (unsigned char *)prs_data_p(outgoing_pdu),
1942 (size_t)prs_offset(outgoing_pdu),
1944 if (!NT_STATUS_IS_OK(status)) {
1945 data_blob_free(&auth_blob);
1950 case PIPE_AUTH_LEVEL_INTEGRITY:
1951 /* Data is signed. */
1952 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1953 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1955 (unsigned char *)prs_data_p(outgoing_pdu),
1956 (size_t)prs_offset(outgoing_pdu),
1958 if (!NT_STATUS_IS_OK(status)) {
1959 data_blob_free(&auth_blob);
1966 smb_panic("bad auth level");
1968 return NT_STATUS_INVALID_PARAMETER;
1971 /* Finally marshall the blob. */
1973 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1974 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1975 (unsigned int)NTLMSSP_SIG_SIZE));
1976 data_blob_free(&auth_blob);
1977 return NT_STATUS_NO_MEMORY;
1980 data_blob_free(&auth_blob);
1981 return NT_STATUS_OK;
1984 /*******************************************************************
1985 Create and add the schannel sign/seal auth header and data.
1986 ********************************************************************/
1988 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1990 uint32 ss_padding_len,
1991 prs_struct *outgoing_pdu)
1993 RPC_HDR_AUTH auth_info;
1994 RPC_AUTH_SCHANNEL_CHK verf;
1995 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1996 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1997 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
2000 return NT_STATUS_INVALID_PARAMETER;
2003 /* Init and marshall the auth header. */
2004 init_rpc_hdr_auth(&auth_info,
2005 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2006 cli->auth->auth_level,
2008 1 /* context id. */);
2010 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2011 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2012 return NT_STATUS_NO_MEMORY;
2015 switch (cli->auth->auth_level) {
2016 case PIPE_AUTH_LEVEL_PRIVACY:
2017 case PIPE_AUTH_LEVEL_INTEGRITY:
2018 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2021 schannel_encode(sas,
2022 cli->auth->auth_level,
2023 SENDER_IS_INITIATOR,
2033 smb_panic("bad auth level");
2035 return NT_STATUS_INVALID_PARAMETER;
2038 /* Finally marshall the blob. */
2039 smb_io_rpc_auth_schannel_chk("",
2040 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2045 return NT_STATUS_OK;
2048 /*******************************************************************
2049 Calculate how much data we're going to send in this packet, also
2050 work out any sign/seal padding length.
2051 ********************************************************************/
2053 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2057 uint32 *p_ss_padding)
2059 uint32 data_space, data_len;
2062 if ((data_left > 0) && (sys_random() % 2)) {
2063 data_left = MAX(data_left/2, 1);
2067 switch (cli->auth->auth_level) {
2068 case PIPE_AUTH_LEVEL_NONE:
2069 case PIPE_AUTH_LEVEL_CONNECT:
2070 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2071 data_len = MIN(data_space, data_left);
2074 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2077 case PIPE_AUTH_LEVEL_INTEGRITY:
2078 case PIPE_AUTH_LEVEL_PRIVACY:
2079 /* Treat the same for all authenticated rpc requests. */
2080 switch(cli->auth->auth_type) {
2081 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2082 case PIPE_AUTH_TYPE_NTLMSSP:
2083 *p_auth_len = NTLMSSP_SIG_SIZE;
2085 case PIPE_AUTH_TYPE_SCHANNEL:
2086 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2089 smb_panic("bad auth type");
2093 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2094 RPC_HDR_AUTH_LEN - *p_auth_len;
2096 data_len = MIN(data_space, data_left);
2099 *p_ss_padding = 8 - (data_len % 8);
2101 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2102 data_len + *p_ss_padding + /* data plus padding. */
2103 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2107 smb_panic("bad auth level");
2113 /*******************************************************************
2115 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2116 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2117 and deals with signing/sealing details.
2118 ********************************************************************/
2120 struct rpc_api_pipe_req_state {
2121 struct event_context *ev;
2122 struct rpc_pipe_client *cli;
2125 prs_struct *req_data;
2126 uint32_t req_data_sent;
2127 prs_struct outgoing_frag;
2128 prs_struct reply_pdu;
2131 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2133 prs_mem_free(&s->outgoing_frag);
2134 prs_mem_free(&s->reply_pdu);
2138 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2139 static void rpc_api_pipe_req_done(struct async_req *subreq);
2140 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2141 bool *is_last_frag);
2143 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2144 struct event_context *ev,
2145 struct rpc_pipe_client *cli,
2147 prs_struct *req_data)
2149 struct async_req *result, *subreq;
2150 struct rpc_api_pipe_req_state *state;
2154 if (!async_req_setup(mem_ctx, &result, &state,
2155 struct rpc_api_pipe_req_state)) {
2160 state->op_num = op_num;
2161 state->req_data = req_data;
2162 state->req_data_sent = 0;
2163 state->call_id = get_rpc_call_id();
2165 if (cli->max_xmit_frag
2166 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2167 /* Server is screwed up ! */
2168 status = NT_STATUS_INVALID_PARAMETER;
2172 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2174 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2176 status = NT_STATUS_NO_MEMORY;
2180 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2182 status = prepare_next_frag(state, &is_last_frag);
2183 if (!NT_STATUS_IS_OK(status)) {
2188 subreq = rpc_api_pipe_send(state, ev, state->cli,
2189 &state->outgoing_frag,
2191 if (subreq == NULL) {
2192 status = NT_STATUS_NO_MEMORY;
2195 subreq->async.fn = rpc_api_pipe_req_done;
2196 subreq->async.priv = result;
2198 subreq = rpc_write_send(state, ev, cli,
2199 prs_data_p(&state->outgoing_frag),
2200 prs_offset(&state->outgoing_frag));
2201 if (subreq == NULL) {
2202 status = NT_STATUS_NO_MEMORY;
2205 subreq->async.fn = rpc_api_pipe_req_write_done;
2206 subreq->async.priv = result;
2211 if (async_post_status(result, ev, status)) {
2214 TALLOC_FREE(result);
2218 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2222 RPC_HDR_REQ hdr_req;
2223 uint32_t data_sent_thistime;
2227 uint32_t ss_padding;
2229 char pad[8] = { 0, };
2232 data_left = prs_offset(state->req_data) - state->req_data_sent;
2234 data_sent_thistime = calculate_data_len_tosend(
2235 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2237 if (state->req_data_sent == 0) {
2238 flags = RPC_FLG_FIRST;
2241 if (data_sent_thistime == data_left) {
2242 flags |= RPC_FLG_LAST;
2245 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2246 return NT_STATUS_NO_MEMORY;
2249 /* Create and marshall the header and request header. */
2250 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2253 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2254 return NT_STATUS_NO_MEMORY;
2257 /* Create the rpc request RPC_HDR_REQ */
2258 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2261 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2262 &state->outgoing_frag, 0)) {
2263 return NT_STATUS_NO_MEMORY;
2266 /* Copy in the data, plus any ss padding. */
2267 if (!prs_append_some_prs_data(&state->outgoing_frag,
2268 state->req_data, state->req_data_sent,
2269 data_sent_thistime)) {
2270 return NT_STATUS_NO_MEMORY;
2273 /* Copy the sign/seal padding data. */
2274 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2275 return NT_STATUS_NO_MEMORY;
2278 /* Generate any auth sign/seal and add the auth footer. */
2279 switch (state->cli->auth->auth_type) {
2280 case PIPE_AUTH_TYPE_NONE:
2281 status = NT_STATUS_OK;
2283 case PIPE_AUTH_TYPE_NTLMSSP:
2284 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2285 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2286 &state->outgoing_frag);
2288 case PIPE_AUTH_TYPE_SCHANNEL:
2289 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2290 &state->outgoing_frag);
2293 status = NT_STATUS_INVALID_PARAMETER;
2297 state->req_data_sent += data_sent_thistime;
2298 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2303 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2305 struct async_req *req = talloc_get_type_abort(
2306 subreq->async.priv, struct async_req);
2307 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2308 req->private_data, struct rpc_api_pipe_req_state);
2312 status = rpc_write_recv(subreq);
2313 TALLOC_FREE(subreq);
2314 if (!NT_STATUS_IS_OK(status)) {
2315 async_req_error(req, status);
2319 status = prepare_next_frag(state, &is_last_frag);
2320 if (!NT_STATUS_IS_OK(status)) {
2321 async_req_error(req, status);
2326 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2327 &state->outgoing_frag,
2329 if (async_req_nomem(subreq, req)) {
2332 subreq->async.fn = rpc_api_pipe_req_done;
2333 subreq->async.priv = req;
2335 subreq = rpc_write_send(state, state->ev, state->cli,
2336 prs_data_p(&state->outgoing_frag),
2337 prs_offset(&state->outgoing_frag));
2338 if (async_req_nomem(subreq, req)) {
2341 subreq->async.fn = rpc_api_pipe_req_write_done;
2342 subreq->async.priv = req;
2346 static void rpc_api_pipe_req_done(struct async_req *subreq)
2348 struct async_req *req = talloc_get_type_abort(
2349 subreq->async.priv, struct async_req);
2350 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2351 req->private_data, struct rpc_api_pipe_req_state);
2354 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2355 TALLOC_FREE(subreq);
2356 if (!NT_STATUS_IS_OK(status)) {
2357 async_req_error(req, status);
2360 async_req_done(req);
2363 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2364 prs_struct *reply_pdu)
2366 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2367 req->private_data, struct rpc_api_pipe_req_state);
2370 if (async_req_is_error(req, &status)) {
2374 *reply_pdu = state->reply_pdu;
2375 reply_pdu->mem_ctx = mem_ctx;
2378 * Prevent state->req_pdu from being freed in
2379 * rpc_api_pipe_req_state_destructor()
2381 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2383 return NT_STATUS_OK;
2386 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2388 prs_struct *in_data,
2389 prs_struct *out_data)
2391 TALLOC_CTX *frame = talloc_stackframe();
2392 struct event_context *ev;
2393 struct async_req *req;
2394 NTSTATUS status = NT_STATUS_NO_MEMORY;
2396 ev = event_context_init(frame);
2401 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2406 while (req->state < ASYNC_REQ_DONE) {
2407 event_loop_once(ev);
2410 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2417 /****************************************************************************
2418 Set the handle state.
2419 ****************************************************************************/
2421 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2422 const char *pipe_name, uint16 device_state)
2424 bool state_set = False;
2426 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2427 char *rparam = NULL;
2429 uint32 rparam_len, rdata_len;
2431 if (pipe_name == NULL)
2434 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2435 cli->fnum, pipe_name, device_state));
2437 /* create parameters: device state */
2438 SSVAL(param, 0, device_state);
2440 /* create setup parameters. */
2442 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2444 /* send the data on \PIPE\ */
2445 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2446 setup, 2, 0, /* setup, length, max */
2447 param, 2, 0, /* param, length, max */
2448 NULL, 0, 1024, /* data, length, max */
2449 &rparam, &rparam_len, /* return param, length */
2450 &rdata, &rdata_len)) /* return data, length */
2452 DEBUG(5, ("Set Handle state: return OK\n"));
2463 /****************************************************************************
2464 Check the rpc bind acknowledge response.
2465 ****************************************************************************/
2467 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2469 if ( hdr_ba->addr.len == 0) {
2470 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2473 /* check the transfer syntax */
2474 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2475 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2476 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2480 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2481 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2482 hdr_ba->res.num_results, hdr_ba->res.reason));
2485 DEBUG(5,("check_bind_response: accepted!\n"));
2489 /*******************************************************************
2490 Creates a DCE/RPC bind authentication response.
2491 This is the packet that is sent back to the server once we
2492 have received a BIND-ACK, to finish the third leg of
2493 the authentication handshake.
2494 ********************************************************************/
2496 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2498 enum pipe_auth_type auth_type,
2499 enum pipe_auth_level auth_level,
2500 DATA_BLOB *pauth_blob,
2501 prs_struct *rpc_out)
2504 RPC_HDR_AUTH hdr_auth;
2507 /* Create the request RPC_HDR */
2508 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2509 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2510 pauth_blob->length );
2513 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2514 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2515 return NT_STATUS_NO_MEMORY;
2519 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2520 about padding - shouldn't this pad to length 8 ? JRA.
2523 /* 4 bytes padding. */
2524 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2525 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2526 return NT_STATUS_NO_MEMORY;
2529 /* Create the request RPC_HDR_AUTHA */
2530 init_rpc_hdr_auth(&hdr_auth,
2531 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2534 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2535 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2536 return NT_STATUS_NO_MEMORY;
2540 * Append the auth data to the outgoing buffer.
2543 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2544 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2545 return NT_STATUS_NO_MEMORY;
2548 return NT_STATUS_OK;
2551 /*******************************************************************
2552 Creates a DCE/RPC bind alter context authentication request which
2553 may contain a spnego auth blobl
2554 ********************************************************************/
2556 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2557 const RPC_IFACE *abstract,
2558 const RPC_IFACE *transfer,
2559 enum pipe_auth_level auth_level,
2560 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2561 prs_struct *rpc_out)
2563 RPC_HDR_AUTH hdr_auth;
2564 prs_struct auth_info;
2565 NTSTATUS ret = NT_STATUS_OK;
2567 ZERO_STRUCT(hdr_auth);
2568 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2569 return NT_STATUS_NO_MEMORY;
2571 /* We may change the pad length before marshalling. */
2572 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2574 if (pauth_blob->length) {
2575 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2576 prs_mem_free(&auth_info);
2577 return NT_STATUS_NO_MEMORY;
2581 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2588 prs_mem_free(&auth_info);
2592 /****************************************************************************
2594 ****************************************************************************/
2596 struct rpc_pipe_bind_state {
2597 struct event_context *ev;
2598 struct rpc_pipe_client *cli;
2600 uint32_t rpc_call_id;
2603 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2605 prs_mem_free(&state->rpc_out);
2609 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2610 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2611 struct rpc_pipe_bind_state *state,
2612 struct rpc_hdr_info *phdr,
2613 prs_struct *reply_pdu);
2614 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2615 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2616 struct rpc_pipe_bind_state *state,
2617 struct rpc_hdr_info *phdr,
2618 prs_struct *reply_pdu);
2619 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2621 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2622 struct event_context *ev,
2623 struct rpc_pipe_client *cli,
2624 struct cli_pipe_auth_data *auth)
2626 struct async_req *result, *subreq;
2627 struct rpc_pipe_bind_state *state;
2630 if (!async_req_setup(mem_ctx, &result, &state,
2631 struct rpc_pipe_bind_state)) {
2635 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2636 rpccli_pipe_txt(debug_ctx(), cli),
2637 (unsigned int)auth->auth_type,
2638 (unsigned int)auth->auth_level ));
2642 state->rpc_call_id = get_rpc_call_id();
2644 prs_init_empty(&state->rpc_out, state, MARSHALL);
2645 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2647 cli->auth = talloc_move(cli, &auth);
2649 /* Marshall the outgoing data. */
2650 status = create_rpc_bind_req(cli, &state->rpc_out,
2652 &cli->abstract_syntax,
2653 &cli->transfer_syntax,
2654 cli->auth->auth_type,
2655 cli->auth->auth_level);
2657 if (!NT_STATUS_IS_OK(status)) {
2661 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2663 if (subreq == NULL) {
2664 status = NT_STATUS_NO_MEMORY;
2667 subreq->async.fn = rpc_pipe_bind_step_one_done;
2668 subreq->async.priv = result;
2672 if (async_post_status(result, ev, status)) {
2675 TALLOC_FREE(result);
2679 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2681 struct async_req *req = talloc_get_type_abort(
2682 subreq->async.priv, struct async_req);
2683 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2684 req->private_data, struct rpc_pipe_bind_state);
2685 prs_struct reply_pdu;
2686 struct rpc_hdr_info hdr;
2687 struct rpc_hdr_ba_info hdr_ba;
2690 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2691 TALLOC_FREE(subreq);
2692 if (!NT_STATUS_IS_OK(status)) {
2693 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2694 rpccli_pipe_txt(debug_ctx(), state->cli),
2695 nt_errstr(status)));
2696 async_req_error(req, status);
2700 /* Unmarshall the RPC header */
2701 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2702 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2703 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2707 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2708 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2710 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2714 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2715 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2716 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2720 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2721 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2724 * For authenticated binds we may need to do 3 or 4 leg binds.
2727 switch(state->cli->auth->auth_type) {
2729 case PIPE_AUTH_TYPE_NONE:
2730 case PIPE_AUTH_TYPE_SCHANNEL:
2731 /* Bind complete. */
2732 async_req_done(req);
2735 case PIPE_AUTH_TYPE_NTLMSSP:
2736 /* Need to send AUTH3 packet - no reply. */
2737 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2739 if (!NT_STATUS_IS_OK(status)) {
2740 async_req_error(req, status);
2744 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2745 /* Need to send alter context request and reply. */
2746 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2748 if (!NT_STATUS_IS_OK(status)) {
2749 async_req_error(req, status);
2753 case PIPE_AUTH_TYPE_KRB5:
2757 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2758 (unsigned int)state->cli->auth->auth_type));
2759 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2763 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2764 struct rpc_pipe_bind_state *state,
2765 struct rpc_hdr_info *phdr,
2766 prs_struct *reply_pdu)
2768 DATA_BLOB server_response = data_blob_null;
2769 DATA_BLOB client_reply = data_blob_null;
2770 struct rpc_hdr_auth_info hdr_auth;
2771 struct async_req *subreq;
2774 if ((phdr->auth_len == 0)
2775 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2776 return NT_STATUS_INVALID_PARAMETER;
2779 if (!prs_set_offset(
2781 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2782 return NT_STATUS_INVALID_PARAMETER;
2785 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2786 return NT_STATUS_INVALID_PARAMETER;
2789 /* TODO - check auth_type/auth_level match. */
2791 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2792 prs_copy_data_out((char *)server_response.data, reply_pdu,
2795 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796 server_response, &client_reply);
2798 if (!NT_STATUS_IS_OK(status)) {
2799 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2800 "blob failed: %s.\n", nt_errstr(status)));
2804 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2806 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2807 state->cli->auth->auth_type,
2808 state->cli->auth->auth_level,
2809 &client_reply, &state->rpc_out);
2810 data_blob_free(&client_reply);
2812 if (!NT_STATUS_IS_OK(status)) {
2816 subreq = rpc_write_send(state, state->ev, state->cli,
2817 prs_data_p(&state->rpc_out),
2818 prs_offset(&state->rpc_out));
2819 if (subreq == NULL) {
2820 return NT_STATUS_NO_MEMORY;
2822 subreq->async.fn = rpc_bind_auth3_write_done;
2823 subreq->async.priv = req;
2824 return NT_STATUS_OK;
2827 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2829 struct async_req *req = talloc_get_type_abort(
2830 subreq->async.priv, struct async_req);
2833 status = rpc_write_recv(subreq);
2834 TALLOC_FREE(subreq);
2835 if (!NT_STATUS_IS_OK(status)) {
2836 async_req_error(req, status);
2839 async_req_done(req);
2842 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2843 struct rpc_pipe_bind_state *state,
2844 struct rpc_hdr_info *phdr,
2845 prs_struct *reply_pdu)
2847 DATA_BLOB server_spnego_response = data_blob_null;
2848 DATA_BLOB server_ntlm_response = data_blob_null;
2849 DATA_BLOB client_reply = data_blob_null;
2850 DATA_BLOB tmp_blob = data_blob_null;
2851 RPC_HDR_AUTH hdr_auth;
2852 struct async_req *subreq;
2855 if ((phdr->auth_len == 0)
2856 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2857 return NT_STATUS_INVALID_PARAMETER;
2860 /* Process the returned NTLMSSP blob first. */
2861 if (!prs_set_offset(
2863 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2864 return NT_STATUS_INVALID_PARAMETER;
2867 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2868 return NT_STATUS_INVALID_PARAMETER;
2871 server_spnego_response = data_blob(NULL, phdr->auth_len);
2872 prs_copy_data_out((char *)server_spnego_response.data,
2873 reply_pdu, phdr->auth_len);
2876 * The server might give us back two challenges - tmp_blob is for the
2879 if (!spnego_parse_challenge(server_spnego_response,
2880 &server_ntlm_response, &tmp_blob)) {
2881 data_blob_free(&server_spnego_response);
2882 data_blob_free(&server_ntlm_response);
2883 data_blob_free(&tmp_blob);
2884 return NT_STATUS_INVALID_PARAMETER;
2887 /* We're finished with the server spnego response and the tmp_blob. */
2888 data_blob_free(&server_spnego_response);
2889 data_blob_free(&tmp_blob);
2891 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2892 server_ntlm_response, &client_reply);
2894 /* Finished with the server_ntlm response */
2895 data_blob_free(&server_ntlm_response);
2897 if (!NT_STATUS_IS_OK(status)) {
2898 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2899 "using server blob failed.\n"));
2900 data_blob_free(&client_reply);
2904 /* SPNEGO wrap the client reply. */
2905 tmp_blob = spnego_gen_auth(client_reply);
2906 data_blob_free(&client_reply);
2907 client_reply = tmp_blob;
2908 tmp_blob = data_blob_null;
2910 /* Now prepare the alter context pdu. */
2911 prs_init_empty(&state->rpc_out, state, MARSHALL);
2913 status = create_rpc_alter_context(state->rpc_call_id,
2914 &state->cli->abstract_syntax,
2915 &state->cli->transfer_syntax,
2916 state->cli->auth->auth_level,
2919 data_blob_free(&client_reply);
2921 if (!NT_STATUS_IS_OK(status)) {
2925 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2926 &state->rpc_out, RPC_ALTCONTRESP);
2927 if (subreq == NULL) {
2928 return NT_STATUS_NO_MEMORY;
2930 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2931 subreq->async.priv = req;
2932 return NT_STATUS_OK;
2935 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2937 struct async_req *req = talloc_get_type_abort(
2938 subreq->async.priv, struct async_req);
2939 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2940 req->private_data, struct rpc_pipe_bind_state);
2941 DATA_BLOB server_spnego_response = data_blob_null;
2942 DATA_BLOB tmp_blob = data_blob_null;
2943 prs_struct reply_pdu;
2944 struct rpc_hdr_info hdr;
2945 struct rpc_hdr_auth_info hdr_auth;
2948 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2949 TALLOC_FREE(subreq);
2950 if (!NT_STATUS_IS_OK(status)) {
2951 async_req_error(req, status);
2955 /* Get the auth blob from the reply. */
2956 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2957 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2958 "unmarshall RPC_HDR.\n"));
2959 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2963 if (!prs_set_offset(
2965 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2966 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2970 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2971 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2975 server_spnego_response = data_blob(NULL, hdr.auth_len);
2976 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2979 /* Check we got a valid auth response. */
2980 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2981 OID_NTLMSSP, &tmp_blob)) {
2982 data_blob_free(&server_spnego_response);
2983 data_blob_free(&tmp_blob);
2984 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2988 data_blob_free(&server_spnego_response);
2989 data_blob_free(&tmp_blob);
2991 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2992 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2993 async_req_done(req);
2996 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2998 return async_req_simple_recv(req);
3001 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3002 struct cli_pipe_auth_data *auth)
3004 TALLOC_CTX *frame = talloc_stackframe();
3005 struct event_context *ev;
3006 struct async_req *req;
3007 NTSTATUS status = NT_STATUS_NO_MEMORY;
3009 ev = event_context_init(frame);
3014 req = rpc_pipe_bind_send(frame, ev, cli, auth);
3019 while (req->state < ASYNC_REQ_DONE) {
3020 event_loop_once(ev);
3023 status = rpc_pipe_bind_recv(req);
3029 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3030 unsigned int timeout)
3032 return cli_set_timeout(cli->trans.np.cli, timeout);
3035 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3037 if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3038 || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3039 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3043 if (cli->transport_type == NCACN_NP) {
3044 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3051 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3053 if (p->transport_type == NCACN_NP) {
3054 return p->trans.np.cli;
3059 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3061 if (p->transport_type == NCACN_NP) {
3063 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3065 DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3066 "pipe %s. Error was %s\n",
3067 rpccli_pipe_txt(debug_ctx(), p),
3068 cli_errstr(p->trans.np.cli)));
3071 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3072 rpccli_pipe_txt(debug_ctx(), p)));
3074 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3075 return ret ? -1 : 0;
3081 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3082 struct cli_pipe_auth_data **presult)
3084 struct cli_pipe_auth_data *result;
3086 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3087 if (result == NULL) {
3088 return NT_STATUS_NO_MEMORY;
3091 result->auth_type = PIPE_AUTH_TYPE_NONE;
3092 result->auth_level = PIPE_AUTH_LEVEL_NONE;
3094 result->user_name = talloc_strdup(result, "");
3095 result->domain = talloc_strdup(result, "");
3096 if ((result->user_name == NULL) || (result->domain == NULL)) {
3097 TALLOC_FREE(result);
3098 return NT_STATUS_NO_MEMORY;
3102 return NT_STATUS_OK;
3105 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3107 ntlmssp_end(&auth->a_u.ntlmssp_state);
3111 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3112 enum pipe_auth_type auth_type,
3113 enum pipe_auth_level auth_level,
3115 const char *username,
3116 const char *password,
3117 struct cli_pipe_auth_data **presult)
3119 struct cli_pipe_auth_data *result;
3122 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3123 if (result == NULL) {
3124 return NT_STATUS_NO_MEMORY;
3127 result->auth_type = auth_type;
3128 result->auth_level = auth_level;
3130 result->user_name = talloc_strdup(result, username);
3131 result->domain = talloc_strdup(result, domain);
3132 if ((result->user_name == NULL) || (result->domain == NULL)) {
3133 status = NT_STATUS_NO_MEMORY;
3137 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3138 if (!NT_STATUS_IS_OK(status)) {
3142 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3144 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3145 if (!NT_STATUS_IS_OK(status)) {
3149 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3150 if (!NT_STATUS_IS_OK(status)) {
3154 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3155 if (!NT_STATUS_IS_OK(status)) {
3160 * Turn off sign+seal to allow selected auth level to turn it back on.
3162 result->a_u.ntlmssp_state->neg_flags &=
3163 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3165 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3166 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3167 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3168 result->a_u.ntlmssp_state->neg_flags
3169 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3173 return NT_STATUS_OK;
3176 TALLOC_FREE(result);
3180 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3181 enum pipe_auth_level auth_level,
3182 const uint8_t sess_key[16],
3183 struct cli_pipe_auth_data **presult)
3185 struct cli_pipe_auth_data *result;
3187 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3188 if (result == NULL) {
3189 return NT_STATUS_NO_MEMORY;
3192 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3193 result->auth_level = auth_level;
3195 result->user_name = talloc_strdup(result, "");
3196 result->domain = talloc_strdup(result, domain);
3197 if ((result->user_name == NULL) || (result->domain == NULL)) {
3201 result->a_u.schannel_auth = talloc(result,
3202 struct schannel_auth_struct);
3203 if (result->a_u.schannel_auth == NULL) {
3207 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3208 sizeof(result->a_u.schannel_auth->sess_key));
3209 result->a_u.schannel_auth->seq_num = 0;
3212 return NT_STATUS_OK;
3215 TALLOC_FREE(result);
3216 return NT_STATUS_NO_MEMORY;
3220 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3222 data_blob_free(&auth->session_key);
3227 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3228 enum pipe_auth_level auth_level,
3229 const char *service_princ,
3230 const char *username,
3231 const char *password,
3232 struct cli_pipe_auth_data **presult)
3235 struct cli_pipe_auth_data *result;
3237 if ((username != NULL) && (password != NULL)) {
3238 int ret = kerberos_kinit_password(username, password, 0, NULL);
3240 return NT_STATUS_ACCESS_DENIED;
3244 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3245 if (result == NULL) {
3246 return NT_STATUS_NO_MEMORY;
3249 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3250 result->auth_level = auth_level;
3253 * Username / domain need fixing!
3255 result->user_name = talloc_strdup(result, "");
3256 result->domain = talloc_strdup(result, "");
3257 if ((result->user_name == NULL) || (result->domain == NULL)) {
3261 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3262 result, struct kerberos_auth_struct);
3263 if (result->a_u.kerberos_auth == NULL) {
3266 talloc_set_destructor(result->a_u.kerberos_auth,
3267 cli_auth_kerberos_data_destructor);
3269 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3270 result, service_princ);
3271 if (result->a_u.kerberos_auth->service_principal == NULL) {
3276 return NT_STATUS_OK;
3279 TALLOC_FREE(result);
3280 return NT_STATUS_NO_MEMORY;
3282 return NT_STATUS_NOT_SUPPORTED;
3286 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3288 close(p->trans.sock.fd);
3293 * Create an rpc pipe client struct, connecting to a tcp port.
3295 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3297 const struct ndr_syntax_id *abstract_syntax,
3298 struct rpc_pipe_client **presult)
3300 struct rpc_pipe_client *result;
3301 struct sockaddr_storage addr;
3304 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3305 if (result == NULL) {
3306 return NT_STATUS_NO_MEMORY;
3309 result->transport_type = NCACN_IP_TCP;
3311 result->abstract_syntax = *abstract_syntax;
3312 result->transfer_syntax = ndr_transfer_syntax;
3314 result->desthost = talloc_strdup(result, host);
3315 result->srv_name_slash = talloc_asprintf_strupper_m(
3316 result, "\\\\%s", result->desthost);
3317 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3318 status = NT_STATUS_NO_MEMORY;
3322 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3323 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3325 if (!resolve_name(host, &addr, 0)) {
3326 status = NT_STATUS_NOT_FOUND;
3330 status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3331 if (!NT_STATUS_IS_OK(status)) {
3335 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3338 return NT_STATUS_OK;
3341 TALLOC_FREE(result);
3346 * Determine the tcp port on which a dcerpc interface is listening
3347 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3350 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3351 const struct ndr_syntax_id *abstract_syntax,
3355 struct rpc_pipe_client *epm_pipe = NULL;
3356 struct cli_pipe_auth_data *auth = NULL;
3357 struct dcerpc_binding *map_binding = NULL;
3358 struct dcerpc_binding *res_binding = NULL;
3359 struct epm_twr_t *map_tower = NULL;
3360 struct epm_twr_t *res_towers = NULL;
3361 struct policy_handle *entry_handle = NULL;
3362 uint32_t num_towers = 0;
3363 uint32_t max_towers = 1;
3364 struct epm_twr_p_t towers;
3365 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3367 if (pport == NULL) {
3368 status = NT_STATUS_INVALID_PARAMETER;
3372 /* open the connection to the endpoint mapper */
3373 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3374 &ndr_table_epmapper.syntax_id,
3377 if (!NT_STATUS_IS_OK(status)) {
3381 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3382 if (!NT_STATUS_IS_OK(status)) {
3386 status = rpc_pipe_bind(epm_pipe, auth);
3387 if (!NT_STATUS_IS_OK(status)) {
3391 /* create tower for asking the epmapper */
3393 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3394 if (map_binding == NULL) {
3395 status = NT_STATUS_NO_MEMORY;
3399 map_binding->transport = NCACN_IP_TCP;
3400 map_binding->object = *abstract_syntax;
3401 map_binding->host = host; /* needed? */
3402 map_binding->endpoint = "0"; /* correct? needed? */
3404 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3405 if (map_tower == NULL) {
3406 status = NT_STATUS_NO_MEMORY;
3410 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3411 &(map_tower->tower));
3412 if (!NT_STATUS_IS_OK(status)) {
3416 /* allocate further parameters for the epm_Map call */
3418 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3419 if (res_towers == NULL) {
3420 status = NT_STATUS_NO_MEMORY;
3423 towers.twr = res_towers;
3425 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3426 if (entry_handle == NULL) {
3427 status = NT_STATUS_NO_MEMORY;
3431 /* ask the endpoint mapper for the port */
3433 status = rpccli_epm_Map(epm_pipe,
3435 CONST_DISCARD(struct GUID *,
3436 &(abstract_syntax->uuid)),
3443 if (!NT_STATUS_IS_OK(status)) {
3447 if (num_towers != 1) {
3448 status = NT_STATUS_UNSUCCESSFUL;
3452 /* extract the port from the answer */
3454 status = dcerpc_binding_from_tower(tmp_ctx,
3455 &(towers.twr->tower),
3457 if (!NT_STATUS_IS_OK(status)) {
3461 /* are further checks here necessary? */
3462 if (res_binding->transport != NCACN_IP_TCP) {
3463 status = NT_STATUS_UNSUCCESSFUL;
3467 *pport = (uint16_t)atoi(res_binding->endpoint);
3470 TALLOC_FREE(tmp_ctx);
3475 * Create a rpc pipe client struct, connecting to a host via tcp.
3476 * The port is determined by asking the endpoint mapper on the given
3479 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3480 const struct ndr_syntax_id *abstract_syntax,
3481 struct rpc_pipe_client **presult)
3488 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3489 if (!NT_STATUS_IS_OK(status)) {
3493 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3494 abstract_syntax, presult);
3500 /********************************************************************
3501 Create a rpc pipe client struct, connecting to a unix domain socket
3502 ********************************************************************/
3503 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3504 const struct ndr_syntax_id *abstract_syntax,
3505 struct rpc_pipe_client **presult)
3507 struct rpc_pipe_client *result;
3508 struct sockaddr_un addr;
3511 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3512 if (result == NULL) {
3513 return NT_STATUS_NO_MEMORY;
3516 result->transport_type = NCACN_UNIX_STREAM;
3518 result->abstract_syntax = *abstract_syntax;
3519 result->transfer_syntax = ndr_transfer_syntax;
3521 result->desthost = talloc_get_myname(result);
3522 result->srv_name_slash = talloc_asprintf_strupper_m(
3523 result, "\\\\%s", result->desthost);
3524 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3525 status = NT_STATUS_NO_MEMORY;
3529 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3530 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3532 result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3533 if (result->trans.sock.fd == -1) {
3534 status = map_nt_error_from_unix(errno);
3538 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3541 addr.sun_family = AF_UNIX;
3542 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3544 if (sys_connect(result->trans.sock.fd,
3545 (struct sockaddr *)&addr) == -1) {
3546 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3548 close(result->trans.sock.fd);
3549 return map_nt_error_from_unix(errno);
3553 return NT_STATUS_OK;
3556 TALLOC_FREE(result);
3561 /****************************************************************************
3562 Open a named pipe over SMB to a remote server.
3564 * CAVEAT CALLER OF THIS FUNCTION:
3565 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3566 * so be sure that this function is called AFTER any structure (vs pointer)
3567 * assignment of the cli. In particular, libsmbclient does structure
3568 * assignments of cli, which invalidates the data in the returned
3569 * rpc_pipe_client if this function is called before the structure assignment
3572 ****************************************************************************/
3574 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3575 const struct ndr_syntax_id *abstract_syntax,
3576 struct rpc_pipe_client **presult)
3578 struct rpc_pipe_client *result;
3581 /* sanity check to protect against crashes */
3584 return NT_STATUS_INVALID_HANDLE;
3587 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3588 if (result == NULL) {
3589 return NT_STATUS_NO_MEMORY;
3592 result->transport_type = NCACN_NP;
3594 result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3595 result, cli, abstract_syntax);
3596 if (result->trans.np.pipe_name == NULL) {
3597 DEBUG(1, ("Could not find pipe for interface\n"));
3598 TALLOC_FREE(result);
3599 return NT_STATUS_INVALID_PARAMETER;
3602 result->trans.np.cli = cli;
3603 result->abstract_syntax = *abstract_syntax;
3604 result->transfer_syntax = ndr_transfer_syntax;
3605 result->desthost = talloc_strdup(result, cli->desthost);
3606 result->srv_name_slash = talloc_asprintf_strupper_m(
3607 result, "\\\\%s", result->desthost);
3609 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3610 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3612 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3613 TALLOC_FREE(result);
3614 return NT_STATUS_NO_MEMORY;
3617 fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3618 DESIRED_ACCESS_PIPE);
3620 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3621 "to machine %s. Error was %s\n",
3622 result->trans.np.pipe_name, cli->desthost,
3624 TALLOC_FREE(result);
3625 return cli_get_nt_error(cli);
3628 result->trans.np.fnum = fnum;
3630 DLIST_ADD(cli->pipe_list, result);
3631 talloc_set_destructor(result, rpc_pipe_destructor);
3634 return NT_STATUS_OK;
3637 /****************************************************************************
3638 Open a pipe to a remote server.
3639 ****************************************************************************/
3641 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3642 const struct ndr_syntax_id *interface,
3643 struct rpc_pipe_client **presult)
3645 if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3647 * We should have a better way to figure out this drsuapi
3650 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3654 return rpc_pipe_open_np(cli, interface, presult);
3657 /****************************************************************************
3658 Open a named pipe to an SMB server and bind anonymously.
3659 ****************************************************************************/
3661 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3662 const struct ndr_syntax_id *interface,
3663 struct rpc_pipe_client **presult)
3665 struct rpc_pipe_client *result;
3666 struct cli_pipe_auth_data *auth;
3669 status = cli_rpc_pipe_open(cli, interface, &result);
3670 if (!NT_STATUS_IS_OK(status)) {
3674 status = rpccli_anon_bind_data(result, &auth);
3675 if (!NT_STATUS_IS_OK(status)) {
3676 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3677 nt_errstr(status)));
3678 TALLOC_FREE(result);
3683 * This is a bit of an abstraction violation due to the fact that an
3684 * anonymous bind on an authenticated SMB inherits the user/domain
3685 * from the enclosing SMB creds
3688 TALLOC_FREE(auth->user_name);
3689 TALLOC_FREE(auth->domain);
3691 auth->user_name = talloc_strdup(auth, cli->user_name);
3692 auth->domain = talloc_strdup(auth, cli->domain);
3693 auth->user_session_key = data_blob_talloc(auth,
3694 cli->user_session_key.data,
3695 cli->user_session_key.length);
3697 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3698 TALLOC_FREE(result);
3699 return NT_STATUS_NO_MEMORY;
3702 status = rpc_pipe_bind(result, auth);
3703 if (!NT_STATUS_IS_OK(status)) {
3705 if (ndr_syntax_id_equal(interface,
3706 &ndr_table_dssetup.syntax_id)) {
3707 /* non AD domains just don't have this pipe, avoid
3708 * level 0 statement in that case - gd */
3711 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3712 "%s failed with error %s\n",
3713 cli_get_pipe_name_from_iface(debug_ctx(), cli,
3715 nt_errstr(status) ));
3716 TALLOC_FREE(result);
3720 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3721 "%s and bound anonymously.\n", result->trans.np.pipe_name,
3725 return NT_STATUS_OK;
3728 /****************************************************************************
3729 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3730 ****************************************************************************/
3732 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3733 const struct ndr_syntax_id *interface,
3734 enum pipe_auth_type auth_type,
3735 enum pipe_auth_level auth_level,
3737 const char *username,
3738 const char *password,
3739 struct rpc_pipe_client **presult)
3741 struct rpc_pipe_client *result;
3742 struct cli_pipe_auth_data *auth;
3745 status = cli_rpc_pipe_open(cli, interface, &result);
3746 if (!NT_STATUS_IS_OK(status)) {
3750 status = rpccli_ntlmssp_bind_data(
3751 result, auth_type, auth_level, domain, username,
3752 cli->pwd.null_pwd ? NULL : password, &auth);
3753 if (!NT_STATUS_IS_OK(status)) {
3754 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3755 nt_errstr(status)));
3759 status = rpc_pipe_bind(result, auth);
3760 if (!NT_STATUS_IS_OK(status)) {
3761 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3762 nt_errstr(status) ));
3766 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3767 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3768 result->trans.np.pipe_name, cli->desthost,
3769 domain, username ));
3772 return NT_STATUS_OK;
3776 TALLOC_FREE(result);
3780 /****************************************************************************
3782 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3783 ****************************************************************************/
3785 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3786 const struct ndr_syntax_id *interface,
3787 enum pipe_auth_level auth_level,
3789 const char *username,
3790 const char *password,
3791 struct rpc_pipe_client **presult)
3793 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3795 PIPE_AUTH_TYPE_NTLMSSP,
3803 /****************************************************************************
3805 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3806 ****************************************************************************/
3808 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3809 const struct ndr_syntax_id *interface,
3810 enum pipe_auth_level auth_level,
3812 const char *username,
3813 const char *password,
3814 struct rpc_pipe_client **presult)
3816 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3818 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3826 /****************************************************************************
3827 Get a the schannel session key out of an already opened netlogon pipe.
3828 ****************************************************************************/
3829 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3830 struct cli_state *cli,
3834 uint32 sec_chan_type = 0;
3835 unsigned char machine_pwd[16];
3836 const char *machine_account;
3839 /* Get the machine account credentials from secrets.tdb. */
3840 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3843 DEBUG(0, ("get_schannel_session_key: could not fetch "
3844 "trust account password for domain '%s'\n",
3846 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3849 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3850 cli->desthost, /* server name */
3851 domain, /* domain */
3852 global_myname(), /* client name */
3853 machine_account, /* machine account name */
3858 if (!NT_STATUS_IS_OK(status)) {
3859 DEBUG(3, ("get_schannel_session_key_common: "
3860 "rpccli_netlogon_setup_creds failed with result %s "
3861 "to server %s, domain %s, machine account %s.\n",
3862 nt_errstr(status), cli->desthost, domain,
3867 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3868 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3870 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3873 return NT_STATUS_OK;;
3876 /****************************************************************************
3877 Open a netlogon pipe and get the schannel session key.
3878 Now exposed to external callers.
3879 ****************************************************************************/
3882 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3885 struct rpc_pipe_client **presult)
3887 struct rpc_pipe_client *netlogon_pipe = NULL;
3890 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3892 if (!NT_STATUS_IS_OK(status)) {
3896 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3898 if (!NT_STATUS_IS_OK(status)) {
3899 TALLOC_FREE(netlogon_pipe);
3903 *presult = netlogon_pipe;
3904 return NT_STATUS_OK;
3907 /****************************************************************************
3909 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3910 using session_key. sign and seal.
3911 ****************************************************************************/
3913 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3914 const struct ndr_syntax_id *interface,
3915 enum pipe_auth_level auth_level,
3917 const struct dcinfo *pdc,
3918 struct rpc_pipe_client **presult)
3920 struct rpc_pipe_client *result;
3921 struct cli_pipe_auth_data *auth;
3924 status = cli_rpc_pipe_open(cli, interface, &result);
3925 if (!NT_STATUS_IS_OK(status)) {
3929 status = rpccli_schannel_bind_data(result, domain, auth_level,
3930 pdc->sess_key, &auth);
3931 if (!NT_STATUS_IS_OK(status)) {
3932 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3933 nt_errstr(status)));
3934 TALLOC_FREE(result);
3938 status = rpc_pipe_bind(result, auth);
3939 if (!NT_STATUS_IS_OK(status)) {
3940 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3941 "cli_rpc_pipe_bind failed with error %s\n",
3942 nt_errstr(status) ));
3943 TALLOC_FREE(result);
3948 * The credentials on a new netlogon pipe are the ones we are passed
3949 * in - copy them over.
3951 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3952 if (result->dc == NULL) {
3953 DEBUG(0, ("talloc failed\n"));
3954 TALLOC_FREE(result);
3955 return NT_STATUS_NO_MEMORY;
3958 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3960 "and bound using schannel.\n",
3961 result->trans.np.pipe_name, cli->desthost, domain ));
3964 return NT_STATUS_OK;
3967 /****************************************************************************
3968 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3969 Fetch the session key ourselves using a temporary netlogon pipe. This
3970 version uses an ntlmssp auth bound netlogon pipe to get the key.
3971 ****************************************************************************/
3973 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3975 const char *username,
3976 const char *password,
3978 struct rpc_pipe_client **presult)
3980 struct rpc_pipe_client *netlogon_pipe = NULL;
3983 status = cli_rpc_pipe_open_spnego_ntlmssp(
3984 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3985 domain, username, password, &netlogon_pipe);
3986 if (!NT_STATUS_IS_OK(status)) {
3990 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3992 if (!NT_STATUS_IS_OK(status)) {
3993 TALLOC_FREE(netlogon_pipe);
3997 *presult = netlogon_pipe;
3998 return NT_STATUS_OK;
4001 /****************************************************************************
4002 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4003 Fetch the session key ourselves using a temporary netlogon pipe. This version
4004 uses an ntlmssp bind to get the session key.
4005 ****************************************************************************/
4007 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4008 const struct ndr_syntax_id *interface,
4009 enum pipe_auth_level auth_level,
4011 const char *username,
4012 const char *password,
4013 struct rpc_pipe_client **presult)
4015 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4016 struct rpc_pipe_client *netlogon_pipe = NULL;
4017 struct rpc_pipe_client *result = NULL;
4020 status = get_schannel_session_key_auth_ntlmssp(
4021 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4022 if (!NT_STATUS_IS_OK(status)) {
4023 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4024 "key from server %s for domain %s.\n",
4025 cli->desthost, domain ));
4029 status = cli_rpc_pipe_open_schannel_with_key(
4030 cli, interface, auth_level, domain, netlogon_pipe->dc,
4033 /* Now we've bound using the session key we can close the netlog pipe. */
4034 TALLOC_FREE(netlogon_pipe);
4036 if (NT_STATUS_IS_OK(status)) {
4042 /****************************************************************************
4043 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4044 Fetch the session key ourselves using a temporary netlogon pipe.
4045 ****************************************************************************/
4047 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4048 const struct ndr_syntax_id *interface,
4049 enum pipe_auth_level auth_level,
4051 struct rpc_pipe_client **presult)
4053 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4054 struct rpc_pipe_client *netlogon_pipe = NULL;
4055 struct rpc_pipe_client *result = NULL;
4058 status = get_schannel_session_key(cli, domain, &neg_flags,
4060 if (!NT_STATUS_IS_OK(status)) {
4061 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4062 "key from server %s for domain %s.\n",
4063 cli->desthost, domain ));
4067 status = cli_rpc_pipe_open_schannel_with_key(
4068 cli, interface, auth_level, domain, netlogon_pipe->dc,
4071 /* Now we've bound using the session key we can close the netlog pipe. */
4072 TALLOC_FREE(netlogon_pipe);
4074 if (NT_STATUS_IS_OK(status)) {
4078 return NT_STATUS_OK;
4081 /****************************************************************************
4082 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4083 The idea is this can be called with service_princ, username and password all
4084 NULL so long as the caller has a TGT.
4085 ****************************************************************************/
4087 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4088 const struct ndr_syntax_id *interface,
4089 enum pipe_auth_level auth_level,
4090 const char *service_princ,
4091 const char *username,
4092 const char *password,
4093 struct rpc_pipe_client **presult)
4096 struct rpc_pipe_client *result;
4097 struct cli_pipe_auth_data *auth;
4100 status = cli_rpc_pipe_open(cli, interface, &result);
4101 if (!NT_STATUS_IS_OK(status)) {
4105 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4106 username, password, &auth);
4107 if (!NT_STATUS_IS_OK(status)) {
4108 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4109 nt_errstr(status)));
4110 TALLOC_FREE(result);
4114 status = rpc_pipe_bind(result, auth);
4115 if (!NT_STATUS_IS_OK(status)) {
4116 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4117 "with error %s\n", nt_errstr(status)));
4118 TALLOC_FREE(result);
4123 return NT_STATUS_OK;
4125 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4126 return NT_STATUS_NOT_IMPLEMENTED;
4130 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4131 struct rpc_pipe_client *cli,
4132 DATA_BLOB *session_key)
4134 if (!session_key || !cli) {
4135 return NT_STATUS_INVALID_PARAMETER;
4139 return NT_STATUS_INVALID_PARAMETER;
4142 switch (cli->auth->auth_type) {
4143 case PIPE_AUTH_TYPE_SCHANNEL:
4144 *session_key = data_blob_talloc(mem_ctx,
4145 cli->auth->a_u.schannel_auth->sess_key, 16);
4147 case PIPE_AUTH_TYPE_NTLMSSP:
4148 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4149 *session_key = data_blob_talloc(mem_ctx,
4150 cli->auth->a_u.ntlmssp_state->session_key.data,
4151 cli->auth->a_u.ntlmssp_state->session_key.length);
4153 case PIPE_AUTH_TYPE_KRB5:
4154 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4155 *session_key = data_blob_talloc(mem_ctx,
4156 cli->auth->a_u.kerberos_auth->session_key.data,
4157 cli->auth->a_u.kerberos_auth->session_key.length);
4159 case PIPE_AUTH_TYPE_NONE:
4160 *session_key = data_blob_talloc(mem_ctx,
4161 cli->auth->user_session_key.data,
4162 cli->auth->user_session_key.length);
4165 return NT_STATUS_NO_USER_SESSION_KEY;
4168 return NT_STATUS_OK;