2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "../libcli/auth/schannel.h"
27 #include "../libcli/auth/spnego.h"
28 #include "../libcli/auth/ntlmssp.h"
31 #define DBGC_CLASS DBGC_RPC_SRV
33 /****************************************************************************
34 Initialise an outgoing packet.
35 ****************************************************************************/
37 static bool pipe_init_outgoing_data(pipes_struct *p)
39 output_data *o_data = &p->out_data;
41 /* Reset the offset counters. */
42 o_data->data_sent_length = 0;
43 o_data->current_pdu_sent = 0;
45 data_blob_free(&o_data->frag);
47 /* Free any memory in the current return data buffer. */
48 data_blob_free(&o_data->rdata);
53 /****************************************************************************
54 Sets the fault state on incoming packets.
55 ****************************************************************************/
57 static void set_incoming_fault(pipes_struct *p)
59 prs_mem_free(&p->in_data.data);
60 p->in_data.pdu_needed_len = 0;
61 p->in_data.pdu.length = 0;
62 p->fault_state = True;
63 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
64 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
67 /****************************************************************************
68 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
69 ****************************************************************************/
71 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
73 size_t len_needed_to_complete_hdr =
74 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
76 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
77 "len_needed_to_complete_hdr = %u, "
79 (unsigned int)data_to_copy,
80 (unsigned int)len_needed_to_complete_hdr,
81 (unsigned int)p->in_data.pdu.length ));
83 if (p->in_data.pdu.data == NULL) {
84 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
86 if (p->in_data.pdu.data == NULL) {
87 DEBUG(0, ("talloc failed\n"));
91 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
92 data, len_needed_to_complete_hdr);
93 p->in_data.pdu.length += len_needed_to_complete_hdr;
95 return (ssize_t)len_needed_to_complete_hdr;
98 static bool get_pdu_size(pipes_struct *p)
101 /* the fill_rpc_header() call insures we copy only
102 * RPC_HEADER_LEN bytes. If this doesn't match then
103 * somethign is very wrong and we can only abort */
104 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
105 DEBUG(0, ("Unexpected RPC Header size! "
106 "got %d, expected %d)\n",
107 (int)p->in_data.pdu.length,
109 set_incoming_fault(p);
113 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
115 /* verify it is a reasonable value */
116 if ((frag_len < RPC_HEADER_LEN) ||
117 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
118 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
120 set_incoming_fault(p);
124 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
126 /* allocate the space needed to fill the pdu */
127 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
129 if (p->in_data.pdu.data == NULL) {
130 DEBUG(0, ("talloc_realloc failed\n"));
131 set_incoming_fault(p);
138 /****************************************************************************
139 Call this to free any talloc'ed memory. Do this after processing
140 a complete incoming and outgoing request (multiple incoming/outgoing
142 ****************************************************************************/
144 static void free_pipe_context(pipes_struct *p)
146 data_blob_free(&p->out_data.frag);
147 data_blob_free(&p->out_data.rdata);
148 prs_mem_free(&p->in_data.data);
150 DEBUG(3, ("free_pipe_context: "
151 "destroying talloc pool of size %lu\n",
152 (unsigned long)talloc_total_size(p->mem_ctx)));
153 talloc_free_children(p->mem_ctx);
155 * Re-initialize to set back to marshalling and set the
156 * offset back to the start of the buffer.
158 if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
159 DEBUG(0, ("free_pipe_context: "
160 "rps_init failed!\n"));
161 p->fault_state = True;
165 /****************************************************************************
166 Processes a request pdu. This will do auth processing if needed, and
167 appends the data into the complete stream if the LAST flag is not set.
168 ****************************************************************************/
170 static bool dcesrv_auth_request(pipes_struct *p, struct ncacn_packet *pkt)
173 size_t hdr_size = DCERPC_REQUEST_LENGTH;
174 struct dcerpc_auth auth;
175 uint32_t auth_length;
179 DEBUG(10, ("Checking request auth.\n"));
181 if (pkt->pfc_flags & DCERPC_PFC_FLAG_OBJECT_UUID) {
185 switch (p->auth.auth_level) {
186 case DCERPC_AUTH_LEVEL_PRIVACY:
187 DEBUG(10, ("Requested Privacy.\n"));
190 case DCERPC_AUTH_LEVEL_INTEGRITY:
191 DEBUG(10, ("Requested Integrity.\n"));
194 case DCERPC_AUTH_LEVEL_CONNECT:
195 if (pkt->auth_length != 0) {
199 case DCERPC_AUTH_LEVEL_NONE:
200 if (pkt->auth_length != 0) {
209 status = dcerpc_pull_auth_trailer(pkt, pkt,
210 &pkt->u.request.stub_and_verifier,
211 &auth, &auth_length, false);
212 if (!NT_STATUS_IS_OK(status)) {
216 pkt->u.request.stub_and_verifier.length -= auth_length;
218 data.data = p->in_data.pdu.data + hdr_size;
219 data.length = pkt->u.request.stub_and_verifier.length;
220 full_pkt.data = p->in_data.pdu.data;
221 full_pkt.length = p->in_data.pdu.length - auth.credentials.length;
223 switch (p->auth.auth_type) {
224 case PIPE_AUTH_TYPE_NONE:
227 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
228 case PIPE_AUTH_TYPE_NTLMSSP:
230 DEBUG(10, ("NTLMSSP auth\n"));
232 if (!p->auth.a_u.auth_ntlmssp_state) {
233 DEBUG(0, ("Invalid auth level, "
234 "failed to process packet auth.\n"));
238 switch (p->auth.auth_level) {
239 case DCERPC_AUTH_LEVEL_PRIVACY:
240 status = auth_ntlmssp_unseal_packet(
241 p->auth.a_u.auth_ntlmssp_state,
242 data.data, data.length,
243 full_pkt.data, full_pkt.length,
245 if (!NT_STATUS_IS_OK(status)) {
248 memcpy(pkt->u.request.stub_and_verifier.data,
249 data.data, data.length);
252 case DCERPC_AUTH_LEVEL_INTEGRITY:
253 status = auth_ntlmssp_check_packet(
254 p->auth.a_u.auth_ntlmssp_state,
255 data.data, data.length,
256 full_pkt.data, full_pkt.length,
258 if (!NT_STATUS_IS_OK(status)) {
264 DEBUG(0, ("Invalid auth level, "
265 "failed to process packet auth.\n"));
270 case PIPE_AUTH_TYPE_SCHANNEL:
272 DEBUG(10, ("SCHANNEL auth\n"));
274 switch (p->auth.auth_level) {
275 case DCERPC_AUTH_LEVEL_PRIVACY:
276 status = netsec_incoming_packet(
277 p->auth.a_u.schannel_auth,
279 data.data, data.length,
281 if (!NT_STATUS_IS_OK(status)) {
284 memcpy(pkt->u.request.stub_and_verifier.data,
285 data.data, data.length);
288 case DCERPC_AUTH_LEVEL_INTEGRITY:
289 status = netsec_incoming_packet(
290 p->auth.a_u.schannel_auth,
292 data.data, data.length,
294 if (!NT_STATUS_IS_OK(status)) {
300 DEBUG(0, ("Invalid auth level, "
301 "failed to process packet auth.\n"));
307 DEBUG(0, ("process_request_pdu: "
308 "unknown auth type %u set.\n",
309 (unsigned int)p->auth.auth_type));
310 set_incoming_fault(p);
314 /* remove the indicated amount of padding */
315 if (pkt->u.request.stub_and_verifier.length < auth.auth_pad_length) {
318 pkt->u.request.stub_and_verifier.length -= auth.auth_pad_length;
323 static bool process_request_pdu(pipes_struct *p, struct ncacn_packet *pkt)
327 if (!p->pipe_bound) {
328 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
329 set_incoming_fault(p);
333 /* Store the opnum */
334 p->opnum = pkt->u.request.opnum;
336 if (!dcesrv_auth_request(p, pkt)) {
337 DEBUG(0,("Failed to check packet auth.\n"));
338 set_incoming_fault(p);
342 data = pkt->u.request.stub_and_verifier;
345 * Check the data length doesn't go over the 15Mb limit.
346 * increased after observing a bug in the Windows NT 4.0 SP6a
347 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
348 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
351 if (prs_offset(&p->in_data.data) + data.length > MAX_RPC_DATA_SIZE) {
352 DEBUG(0, ("process_request_pdu: "
353 "rpc data buffer too large (%u) + (%u)\n",
354 (unsigned int)prs_data_size(&p->in_data.data),
355 (unsigned int)data.length));
356 set_incoming_fault(p);
361 * Append the data portion into the buffer and return.
364 if (!prs_copy_data_in(&p->in_data.data,
365 (char *)data.data, data.length)) {
366 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
367 "to parse buffer of size %u.\n",
368 (unsigned int)data.length,
369 (unsigned int)prs_data_size(&p->in_data.data)));
370 set_incoming_fault(p);
374 if (pkt->pfc_flags & DCERPC_PFC_FLAG_LAST) {
377 * Ok - we finally have a complete RPC stream.
378 * Call the rpc command to process it.
382 * Ensure the internal prs buffer size is *exactly* the same
383 * size as the current offset.
386 if (!prs_set_buffer_size(&p->in_data.data,
387 prs_offset(&p->in_data.data))) {
388 DEBUG(0, ("process_request_pdu: "
389 "Call to prs_set_buffer_size failed!\n"));
390 set_incoming_fault(p);
395 * Set the parse offset to the start of the data and set the
396 * prs_struct to UNMARSHALL.
399 prs_set_offset(&p->in_data.data, 0);
400 prs_switch_type(&p->in_data.data, UNMARSHALL);
403 * Process the complete data stream here.
406 if (pipe_init_outgoing_data(p)) {
407 ret = api_pipe_request(p, pkt);
416 /****************************************************************************
417 Processes a finished PDU stored in p->in_data.pdu.
418 ****************************************************************************/
420 static void process_complete_pdu(pipes_struct *p)
422 struct ncacn_packet *pkt = NULL;
427 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
428 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
432 pkt = talloc(p->mem_ctx, struct ncacn_packet);
434 DEBUG(0, ("Out of memory!\n"));
438 status = dcerpc_pull_ncacn_packet(pkt, &p->in_data.pdu, pkt);
439 if (!NT_STATUS_IS_OK(status)) {
440 DEBUG(0, ("Failed to unmarshal rpc packet: %s!\n",
445 /* Store the call_id */
446 p->call_id = pkt->call_id;
449 * Ensure we're using the corrent endianness for both the
450 * RPC header flags and the raw data we will be reading from.
452 if (pkt->drep[0] == DCERPC_DREP_LE) {
453 p->endian = RPC_LITTLE_ENDIAN;
455 p->endian = RPC_BIG_ENDIAN;
457 prs_set_endian_data(&p->in_data.data, p->endian);
459 DEBUG(10, ("Processing packet type %d\n", (int)pkt->ptype));
461 switch (pkt->ptype) {
462 case DCERPC_PKT_REQUEST:
463 reply = process_request_pdu(p, pkt);
466 case DCERPC_PKT_PING: /* CL request - ignore... */
467 DEBUG(0, ("process_complete_pdu: Error. "
468 "Connectionless packet type %d received on "
469 "pipe %s.\n", (int)pkt->ptype,
470 get_pipe_name_from_syntax(talloc_tos(),
474 case DCERPC_PKT_RESPONSE: /* No responses here. */
475 DEBUG(0, ("process_complete_pdu: Error. "
476 "DCERPC_PKT_RESPONSE received from client "
478 get_pipe_name_from_syntax(talloc_tos(),
482 case DCERPC_PKT_FAULT:
483 case DCERPC_PKT_WORKING:
484 /* CL request - reply to a ping when a call in process. */
485 case DCERPC_PKT_NOCALL:
486 /* CL - server reply to a ping call. */
487 case DCERPC_PKT_REJECT:
489 case DCERPC_PKT_CL_CANCEL:
490 case DCERPC_PKT_FACK:
491 case DCERPC_PKT_CANCEL_ACK:
492 DEBUG(0, ("process_complete_pdu: Error. "
493 "Connectionless packet type %u received on "
494 "pipe %s.\n", (unsigned int)pkt->ptype,
495 get_pipe_name_from_syntax(talloc_tos(),
499 case DCERPC_PKT_BIND:
501 * We assume that a pipe bind is only in one pdu.
503 if (pipe_init_outgoing_data(p)) {
504 reply = api_pipe_bind_req(p, pkt);
508 case DCERPC_PKT_BIND_ACK:
509 case DCERPC_PKT_BIND_NAK:
510 DEBUG(0, ("process_complete_pdu: Error. "
511 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
512 "packet type %u received on pipe %s.\n",
513 (unsigned int)pkt->ptype,
514 get_pipe_name_from_syntax(talloc_tos(),
519 case DCERPC_PKT_ALTER:
521 * We assume that a pipe bind is only in one pdu.
523 if (pipe_init_outgoing_data(p)) {
524 reply = api_pipe_alter_context(p, pkt);
528 case DCERPC_PKT_ALTER_RESP:
529 DEBUG(0, ("process_complete_pdu: Error. "
530 "DCERPC_PKT_ALTER_RESP on pipe %s: "
531 "Should only be server -> client.\n",
532 get_pipe_name_from_syntax(talloc_tos(),
536 case DCERPC_PKT_AUTH3:
538 * The third packet in an NTLMSSP auth exchange.
540 if (pipe_init_outgoing_data(p)) {
541 reply = api_pipe_bind_auth3(p, pkt);
545 case DCERPC_PKT_SHUTDOWN:
546 DEBUG(0, ("process_complete_pdu: Error. "
547 "DCERPC_PKT_SHUTDOWN on pipe %s: "
548 "Should only be server -> client.\n",
549 get_pipe_name_from_syntax(talloc_tos(),
553 case DCERPC_PKT_CO_CANCEL:
554 /* For now just free all client data and continue
556 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
557 " Abandoning rpc call.\n"));
558 /* As we never do asynchronous RPC serving, we can
559 * never cancel a call (as far as I know).
560 * If we ever did we'd have to send a cancel_ack reply.
561 * For now, just free all client data and continue
567 /* Enable this if we're doing async rpc. */
568 /* We must check the outstanding callid matches. */
569 if (pipe_init_outgoing_data(p)) {
570 /* Send a cancel_ack PDU reply. */
571 /* We should probably check the auth-verifier here. */
572 reply = setup_cancel_ack_reply(p, pkt);
577 case DCERPC_PKT_ORPHANED:
578 /* We should probably check the auth-verifier here.
579 * For now just free all client data and continue
581 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
582 " Abandoning rpc call.\n"));
587 DEBUG(0, ("process_complete_pdu: "
588 "Unknown rpc type = %u received.\n",
589 (unsigned int)pkt->ptype));
594 /* Reset to little endian.
595 * Probably don't need this but it won't hurt. */
596 prs_set_endian_data(&p->in_data.data, RPC_LITTLE_ENDIAN);
599 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
600 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
602 set_incoming_fault(p);
603 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
607 * Reset the lengths. We're ready for a new pdu.
609 TALLOC_FREE(p->in_data.pdu.data);
610 p->in_data.pdu_needed_len = 0;
611 p->in_data.pdu.length = 0;
617 /****************************************************************************
618 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
619 ****************************************************************************/
621 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
623 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
624 - p->in_data.pdu.length);
626 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
627 "pdu_needed_len = %u, incoming data = %u\n",
628 (unsigned int)p->in_data.pdu.length,
629 (unsigned int)p->in_data.pdu_needed_len,
632 if(data_to_copy == 0) {
634 * This is an error - data is being received and there is no
635 * space in the PDU. Free the received data and go into the
638 DEBUG(0, ("process_incoming_data: "
639 "No space in incoming pdu buffer. "
640 "Current size = %u incoming data size = %u\n",
641 (unsigned int)p->in_data.pdu.length,
643 set_incoming_fault(p);
648 * If we have no data already, wait until we get at least
649 * a RPC_HEADER_LEN * number of bytes before we can do anything.
652 if ((p->in_data.pdu_needed_len == 0) &&
653 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
655 * Always return here. If we have more data then the RPC_HEADER
656 * will be processed the next time around the loop.
658 return fill_rpc_header(p, data, data_to_copy);
662 * At this point we know we have at least an RPC_HEADER_LEN amount of
663 * data stored in p->in_data.pdu.
667 * If pdu_needed_len is zero this is a new pdu.
668 * Check how much more data we need, then loop again.
670 if (p->in_data.pdu_needed_len == 0) {
672 bool ok = get_pdu_size(p);
676 if (p->in_data.pdu_needed_len > 0) {
680 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
681 * that consists of an RPC_HEADER only. This is a
682 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
683 * DCERPC_PKT_ORPHANED pdu type.
684 * Deal with this in process_complete_pdu(). */
688 * Ok - at this point we have a valid RPC_HEADER.
689 * Keep reading until we have a full pdu.
692 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
695 * Copy as much of the data as we need into the p->in_data.pdu buffer.
696 * pdu_needed_len becomes zero when we have a complete pdu.
699 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
701 p->in_data.pdu.length += data_to_copy;
702 p->in_data.pdu_needed_len -= data_to_copy;
705 * Do we have a complete PDU ?
706 * (return the number of bytes handled in the call)
709 if(p->in_data.pdu_needed_len == 0) {
710 process_complete_pdu(p);
714 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
715 "pdu.length = %u, pdu_needed_len = %u\n",
716 (unsigned int)p->in_data.pdu.length,
717 (unsigned int)p->in_data.pdu_needed_len));
719 return (ssize_t)data_to_copy;
722 /****************************************************************************
723 Accepts incoming data on an internal rpc pipe.
724 ****************************************************************************/
726 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
728 size_t data_left = n;
733 DEBUG(10, ("write_to_pipe: data_left = %u\n",
734 (unsigned int)data_left));
736 data_used = process_incoming_data(p, data, data_left);
738 DEBUG(10, ("write_to_pipe: data_used = %d\n",
745 data_left -= data_used;
752 /****************************************************************************
753 Replies to a request to read data from a pipe.
755 Headers are interspersed with the data at PDU intervals. By the time
756 this function is called, the start of the data could possibly have been
757 read by an SMBtrans (file_offset != 0).
759 Calling create_rpc_reply() here is a hack. The data should already
760 have been prepared into arrays of headers + data stream sections.
761 ****************************************************************************/
763 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
764 size_t n, bool *is_data_outstanding)
766 uint32 pdu_remaining = 0;
767 ssize_t data_returned = 0;
770 DEBUG(0,("read_from_pipe: pipe not open\n"));
774 DEBUG(6,(" name: %s len: %u\n",
775 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
779 * We cannot return more than one PDU length per
784 * This condition should result in the connection being closed.
785 * Netapp filers seem to set it to 0xffff which results in domain
786 * authentications failing. Just ignore it so things work.
789 if(n > RPC_MAX_PDU_FRAG_LEN) {
790 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
791 "pipe %s. We can only service %d sized reads.\n",
793 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
794 RPC_MAX_PDU_FRAG_LEN ));
795 n = RPC_MAX_PDU_FRAG_LEN;
799 * Determine if there is still data to send in the
800 * pipe PDU buffer. Always send this first. Never
801 * send more than is left in the current PDU. The
802 * client should send a new read request for a new
806 pdu_remaining = p->out_data.frag.length
807 - p->out_data.current_pdu_sent;
809 if (pdu_remaining > 0) {
810 data_returned = (ssize_t)MIN(n, pdu_remaining);
812 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
813 "current_pdu_sent = %u returning %d bytes.\n",
814 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
815 (unsigned int)p->out_data.frag.length,
816 (unsigned int)p->out_data.current_pdu_sent,
817 (int)data_returned));
820 p->out_data.frag.data
821 + p->out_data.current_pdu_sent,
824 p->out_data.current_pdu_sent += (uint32)data_returned;
829 * At this point p->current_pdu_len == p->current_pdu_sent (which
830 * may of course be zero if this is the first return fragment.
833 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
834 "= %u, p->out_data.rdata.length = %u.\n",
835 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
837 (unsigned int)p->out_data.data_sent_length,
838 (unsigned int)p->out_data.rdata.length));
840 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
842 * We have sent all possible data, return 0.
849 * We need to create a new PDU from the data left in p->rdata.
850 * Create the header/data/footers. This also sets up the fields
851 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
852 * and stores the outgoing PDU in p->current_pdu.
855 if(!create_next_pdu(p)) {
856 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
857 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
861 data_returned = MIN(n, p->out_data.frag.length);
863 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
864 p->out_data.current_pdu_sent += (uint32)data_returned;
867 (*is_data_outstanding) = p->out_data.frag.length > n;
869 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
870 /* We've returned everything in the out_data.frag
871 * so we're done with this pdu. Free it and reset
872 * current_pdu_sent. */
873 p->out_data.current_pdu_sent = 0;
874 data_blob_free(&p->out_data.frag);
876 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
878 * We're completely finished with both outgoing and
879 * incoming data streams. It's safe to free all
880 * temporary data from this request.
882 free_pipe_context(p);
886 return data_returned;
889 bool fsp_is_np(struct files_struct *fsp)
891 enum FAKE_FILE_TYPE type;
893 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
897 type = fsp->fake_file_handle->type;
899 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
900 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
903 struct np_proxy_state {
905 uint16_t device_state;
906 uint64_t allocation_size;
907 struct tstream_context *npipe;
908 struct tevent_queue *read_queue;
909 struct tevent_queue *write_queue;
912 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
913 const char *pipe_name,
914 const struct tsocket_address *local_address,
915 const struct tsocket_address *remote_address,
916 struct auth_serversupplied_info *server_info)
918 struct np_proxy_state *result;
920 const char *socket_dir;
921 struct tevent_context *ev;
922 struct tevent_req *subreq;
923 struct netr_SamInfo3 *info3;
929 result = talloc(mem_ctx, struct np_proxy_state);
930 if (result == NULL) {
931 DEBUG(0, ("talloc failed\n"));
935 result->read_queue = tevent_queue_create(result, "np_read");
936 if (result->read_queue == NULL) {
937 DEBUG(0, ("tevent_queue_create failed\n"));
941 result->write_queue = tevent_queue_create(result, "np_write");
942 if (result->write_queue == NULL) {
943 DEBUG(0, ("tevent_queue_create failed\n"));
947 ev = s3_tevent_context_init(talloc_tos());
949 DEBUG(0, ("s3_tevent_context_init failed\n"));
953 socket_dir = lp_parm_const_string(
954 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
955 get_dyn_NCALRPCDIR());
956 if (socket_dir == NULL) {
957 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
960 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
961 if (socket_np_dir == NULL) {
962 DEBUG(0, ("talloc_asprintf failed\n"));
966 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
968 DEBUG(0, ("talloc failed\n"));
972 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
973 if (!NT_STATUS_IS_OK(status)) {
975 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
981 subreq = tstream_npa_connect_send(talloc_tos(), ev,
984 remote_address, /* client_addr */
985 NULL, /* client_name */
986 local_address, /* server_addr */
987 NULL, /* server_name */
989 server_info->user_session_key,
990 data_blob_null /* delegated_creds */);
991 if (subreq == NULL) {
993 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
994 "user %s\\%s failed\n",
995 socket_np_dir, pipe_name, info3->base.domain.string,
996 info3->base.account_name.string));
999 ok = tevent_req_poll(subreq, ev);
1002 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
1003 "failed for tstream_npa_connect: %s\n",
1004 socket_np_dir, pipe_name, info3->base.domain.string,
1005 info3->base.account_name.string,
1010 ret = tstream_npa_connect_recv(subreq, &sys_errno,
1014 &result->device_state,
1015 &result->allocation_size);
1016 TALLOC_FREE(subreq);
1018 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
1019 "user %s\\%s failed: %s\n",
1020 socket_np_dir, pipe_name, info3->base.domain.string,
1021 info3->base.account_name.string,
1022 strerror(sys_errno)));
1029 TALLOC_FREE(result);
1033 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1034 const struct tsocket_address *local_address,
1035 const struct tsocket_address *remote_address,
1036 struct auth_serversupplied_info *server_info,
1037 struct fake_file_handle **phandle)
1039 const char **proxy_list;
1040 struct fake_file_handle *handle;
1042 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1044 handle = talloc(mem_ctx, struct fake_file_handle);
1045 if (handle == NULL) {
1046 return NT_STATUS_NO_MEMORY;
1049 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1050 struct np_proxy_state *p;
1052 p = make_external_rpc_pipe_p(handle, name,
1057 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1058 handle->private_data = p;
1060 struct pipes_struct *p;
1061 struct ndr_syntax_id syntax;
1062 const char *client_address;
1064 if (!is_known_pipename(name, &syntax)) {
1065 TALLOC_FREE(handle);
1066 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1069 if (tsocket_address_is_inet(remote_address, "ip")) {
1070 client_address = tsocket_address_inet_addr_string(
1073 if (client_address == NULL) {
1074 TALLOC_FREE(handle);
1075 return NT_STATUS_NO_MEMORY;
1078 client_address = "";
1081 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1084 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1085 handle->private_data = p;
1088 if (handle->private_data == NULL) {
1089 TALLOC_FREE(handle);
1090 return NT_STATUS_PIPE_NOT_AVAILABLE;
1095 return NT_STATUS_OK;
1098 bool np_read_in_progress(struct fake_file_handle *handle)
1100 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1104 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1105 struct np_proxy_state *p = talloc_get_type_abort(
1106 handle->private_data, struct np_proxy_state);
1109 read_count = tevent_queue_length(p->read_queue);
1110 if (read_count > 0) {
1120 struct np_write_state {
1121 struct event_context *ev;
1122 struct np_proxy_state *p;
1127 static void np_write_done(struct tevent_req *subreq);
1129 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1130 struct fake_file_handle *handle,
1131 const uint8_t *data, size_t len)
1133 struct tevent_req *req;
1134 struct np_write_state *state;
1137 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1138 dump_data(50, data, len);
1140 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1146 state->nwritten = 0;
1147 status = NT_STATUS_OK;
1151 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1152 struct pipes_struct *p = talloc_get_type_abort(
1153 handle->private_data, struct pipes_struct);
1155 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1157 status = (state->nwritten >= 0)
1158 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1162 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1163 struct np_proxy_state *p = talloc_get_type_abort(
1164 handle->private_data, struct np_proxy_state);
1165 struct tevent_req *subreq;
1169 state->iov.iov_base = CONST_DISCARD(void *, data);
1170 state->iov.iov_len = len;
1172 subreq = tstream_writev_queue_send(state, ev,
1176 if (subreq == NULL) {
1179 tevent_req_set_callback(subreq, np_write_done, req);
1183 status = NT_STATUS_INVALID_HANDLE;
1185 if (NT_STATUS_IS_OK(status)) {
1186 tevent_req_done(req);
1188 tevent_req_nterror(req, status);
1190 return tevent_req_post(req, ev);
1196 static void np_write_done(struct tevent_req *subreq)
1198 struct tevent_req *req = tevent_req_callback_data(
1199 subreq, struct tevent_req);
1200 struct np_write_state *state = tevent_req_data(
1201 req, struct np_write_state);
1205 received = tstream_writev_queue_recv(subreq, &err);
1207 tevent_req_nterror(req, map_nt_error_from_unix(err));
1210 state->nwritten = received;
1211 tevent_req_done(req);
1214 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1216 struct np_write_state *state = tevent_req_data(
1217 req, struct np_write_state);
1220 if (tevent_req_is_nterror(req, &status)) {
1223 *pnwritten = state->nwritten;
1224 return NT_STATUS_OK;
1227 struct np_ipc_readv_next_vector_state {
1234 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1235 uint8_t *buf, size_t len)
1240 s->len = MIN(len, UINT16_MAX);
1243 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1245 TALLOC_CTX *mem_ctx,
1246 struct iovec **_vector,
1249 struct np_ipc_readv_next_vector_state *state =
1250 (struct np_ipc_readv_next_vector_state *)private_data;
1251 struct iovec *vector;
1255 if (state->ofs == state->len) {
1261 pending = tstream_pending_bytes(stream);
1262 if (pending == -1) {
1266 if (pending == 0 && state->ofs != 0) {
1267 /* return a short read */
1274 /* we want at least one byte and recheck again */
1277 size_t missing = state->len - state->ofs;
1278 if (pending > missing) {
1279 /* there's more available */
1280 state->remaining = pending - missing;
1283 /* read what we can get and recheck in the next cycle */
1288 vector = talloc_array(mem_ctx, struct iovec, 1);
1293 vector[0].iov_base = state->buf + state->ofs;
1294 vector[0].iov_len = wanted;
1296 state->ofs += wanted;
1303 struct np_read_state {
1304 struct np_proxy_state *p;
1305 struct np_ipc_readv_next_vector_state next_vector;
1308 bool is_data_outstanding;
1311 static void np_read_done(struct tevent_req *subreq);
1313 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1314 struct fake_file_handle *handle,
1315 uint8_t *data, size_t len)
1317 struct tevent_req *req;
1318 struct np_read_state *state;
1321 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1326 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1327 struct pipes_struct *p = talloc_get_type_abort(
1328 handle->private_data, struct pipes_struct);
1330 state->nread = read_from_internal_pipe(
1331 p, (char *)data, len, &state->is_data_outstanding);
1333 status = (state->nread >= 0)
1334 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1338 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1339 struct np_proxy_state *p = talloc_get_type_abort(
1340 handle->private_data, struct np_proxy_state);
1341 struct tevent_req *subreq;
1343 np_ipc_readv_next_vector_init(&state->next_vector,
1346 subreq = tstream_readv_pdu_queue_send(state,
1350 np_ipc_readv_next_vector,
1351 &state->next_vector);
1352 if (subreq == NULL) {
1355 tevent_req_set_callback(subreq, np_read_done, req);
1359 status = NT_STATUS_INVALID_HANDLE;
1361 if (NT_STATUS_IS_OK(status)) {
1362 tevent_req_done(req);
1364 tevent_req_nterror(req, status);
1366 return tevent_req_post(req, ev);
1369 static void np_read_done(struct tevent_req *subreq)
1371 struct tevent_req *req = tevent_req_callback_data(
1372 subreq, struct tevent_req);
1373 struct np_read_state *state = tevent_req_data(
1374 req, struct np_read_state);
1378 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1379 TALLOC_FREE(subreq);
1381 tevent_req_nterror(req, map_nt_error_from_unix(err));
1386 state->is_data_outstanding = (state->next_vector.remaining > 0);
1388 tevent_req_done(req);
1392 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1393 bool *is_data_outstanding)
1395 struct np_read_state *state = tevent_req_data(
1396 req, struct np_read_state);
1399 if (tevent_req_is_nterror(req, &status)) {
1402 *nread = state->nread;
1403 *is_data_outstanding = state->is_data_outstanding;
1404 return NT_STATUS_OK;
1408 * @brief Create a new RPC client context which uses a local dispatch function.
1410 * @param[in] conn The connection struct that will hold the pipe
1412 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1414 * @return NT_STATUS_OK on success, a corresponding NT status if an
1417 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1418 struct rpc_pipe_client **spoolss_pipe)
1422 /* TODO: check and handle disconnections */
1424 if (!conn->spoolss_pipe) {
1425 status = rpc_pipe_open_internal(conn,
1426 &ndr_table_spoolss.syntax_id,
1428 &conn->spoolss_pipe);
1429 if (!NT_STATUS_IS_OK(status)) {
1434 *spoolss_pipe = conn->spoolss_pipe;
1435 return NT_STATUS_OK;