2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
28 #define DBGC_CLASS DBGC_RPC_SRV
30 /****************************************************************************
31 Initialise an outgoing packet.
32 ****************************************************************************/
34 static bool pipe_init_outgoing_data(pipes_struct *p)
36 output_data *o_data = &p->out_data;
38 /* Reset the offset counters. */
39 o_data->data_sent_length = 0;
40 o_data->current_pdu_sent = 0;
42 prs_mem_free(&o_data->frag);
44 /* Free any memory in the current return data buffer. */
45 prs_mem_free(&o_data->rdata);
48 * Initialize the outgoing RPC data buffer.
49 * we will use this as the raw data area for replying to rpc requests.
51 if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
59 /****************************************************************************
60 Sets the fault state on incoming packets.
61 ****************************************************************************/
63 static void set_incoming_fault(pipes_struct *p)
65 prs_mem_free(&p->in_data.data);
66 p->in_data.pdu_needed_len = 0;
67 p->in_data.pdu_received_len = 0;
68 p->fault_state = True;
69 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
73 /****************************************************************************
74 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
79 size_t len_needed_to_complete_hdr =
80 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
82 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83 "len_needed_to_complete_hdr = %u, "
85 (unsigned int)data_to_copy,
86 (unsigned int)len_needed_to_complete_hdr,
87 (unsigned int)p->in_data.pdu_received_len ));
89 if (p->in_data.current_in_pdu == NULL) {
90 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
93 if (p->in_data.current_in_pdu == NULL) {
94 DEBUG(0, ("talloc failed\n"));
98 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99 data, len_needed_to_complete_hdr);
100 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
102 return (ssize_t)len_needed_to_complete_hdr;
105 /****************************************************************************
106 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
107 ****************************************************************************/
109 static ssize_t unmarshall_rpc_header(pipes_struct *p)
112 * Unmarshall the header to determine the needed length.
117 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
118 DEBUG(0, ("unmarshall_rpc_header: "
119 "assert on rpc header length failed.\n"));
120 set_incoming_fault(p);
124 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
125 prs_set_endian_data( &rpc_in, p->endian);
127 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
128 p->in_data.pdu_received_len, False);
131 * Unmarshall the header as this will tell us how much
132 * data we need to read to get the complete pdu.
133 * This also sets the endian flag in rpc_in.
136 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
137 DEBUG(0, ("unmarshall_rpc_header: "
138 "failed to unmarshall RPC_HDR.\n"));
139 set_incoming_fault(p);
140 prs_mem_free(&rpc_in);
145 * Validate the RPC header.
148 if(p->hdr.major != 5 && p->hdr.minor != 0) {
149 DEBUG(0, ("unmarshall_rpc_header: "
150 "invalid major/minor numbers in RPC_HDR.\n"));
151 set_incoming_fault(p);
152 prs_mem_free(&rpc_in);
157 * If there's not data in the incoming buffer this should be the
158 * start of a new RPC.
161 if(prs_offset(&p->in_data.data) == 0) {
164 * AS/U doesn't set FIRST flag in a BIND packet it seems.
167 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
168 !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
170 * Ensure that the FIRST flag is set.
171 * If not then we have a stream missmatch.
174 DEBUG(0, ("unmarshall_rpc_header: "
175 "FIRST flag not set in first PDU !\n"));
176 set_incoming_fault(p);
177 prs_mem_free(&rpc_in);
182 * If this is the first PDU then set the endianness
183 * flag in the pipe. We will need this when parsing all
187 p->endian = rpc_in.bigendian_data;
189 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
190 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
195 * If this is *NOT* the first PDU then check the endianness
196 * flag in the pipe is the same as that in the PDU.
199 if (p->endian != rpc_in.bigendian_data) {
200 DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
201 "flag (%d) different in next PDU !\n",
203 set_incoming_fault(p);
204 prs_mem_free(&rpc_in);
210 * Ensure that the pdu length is sane.
213 if ((p->hdr.frag_len < RPC_HEADER_LEN) ||
214 (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
215 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
216 set_incoming_fault(p);
217 prs_mem_free(&rpc_in);
221 DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
222 (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
224 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
226 prs_mem_free(&rpc_in);
228 p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
229 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
230 if (p->in_data.current_in_pdu == NULL) {
231 DEBUG(0, ("talloc failed\n"));
232 set_incoming_fault(p);
236 return 0; /* No extra data processed. */
239 /****************************************************************************
240 Call this to free any talloc'ed memory. Do this after processing
241 a complete incoming and outgoing request (multiple incoming/outgoing
243 ****************************************************************************/
245 static void free_pipe_context(pipes_struct *p)
247 prs_mem_free(&p->out_data.frag);
248 prs_mem_free(&p->out_data.rdata);
249 prs_mem_free(&p->in_data.data);
251 DEBUG(3, ("free_pipe_context: "
252 "destroying talloc pool of size %lu\n",
253 (unsigned long)talloc_total_size(p->mem_ctx)));
254 talloc_free_children(p->mem_ctx);
256 * Re-initialize to set back to marshalling and set the
257 * offset back to the start of the buffer.
259 if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
260 DEBUG(0, ("free_pipe_context: "
261 "rps_init failed!\n"));
262 p->fault_state = True;
266 /****************************************************************************
267 Processes a request pdu. This will do auth processing if needed, and
268 appends the data into the complete stream if the LAST flag is not set.
269 ****************************************************************************/
271 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
273 uint32 ss_padding_len = 0;
274 size_t data_len = p->hdr.frag_len
277 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
281 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
282 set_incoming_fault(p);
287 * Check if we need to do authentication processing.
288 * This is only done on requests, not binds.
292 * Read the RPC request header.
295 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
296 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
297 set_incoming_fault(p);
301 switch(p->auth.auth_type) {
302 case PIPE_AUTH_TYPE_NONE:
305 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
306 case PIPE_AUTH_TYPE_NTLMSSP:
309 if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
312 DEBUG(0, ("process_request_pdu: "
313 "failed to do auth processing.\n"));
314 DEBUG(0, ("process_request_pdu: error is %s\n",
316 set_incoming_fault(p);
322 case PIPE_AUTH_TYPE_SCHANNEL:
323 if (!api_pipe_schannel_process(p, rpc_in_p,
325 DEBUG(3, ("process_request_pdu: "
326 "failed to do schannel processing.\n"));
327 set_incoming_fault(p);
333 DEBUG(0, ("process_request_pdu: "
334 "unknown auth type %u set.\n",
335 (unsigned int)p->auth.auth_type));
336 set_incoming_fault(p);
340 /* Now we've done the sign/seal we can remove any padding data. */
341 if (data_len > ss_padding_len) {
342 data_len -= ss_padding_len;
346 * Check the data length doesn't go over the 15Mb limit.
347 * increased after observing a bug in the Windows NT 4.0 SP6a
348 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
349 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
352 if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
353 DEBUG(0, ("process_request_pdu: "
354 "rpc data buffer too large (%u) + (%u)\n",
355 (unsigned int)prs_data_size(&p->in_data.data),
356 (unsigned int)data_len ));
357 set_incoming_fault(p);
362 * Append the data portion into the buffer and return.
365 if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
366 prs_offset(rpc_in_p), data_len)) {
367 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
368 "to parse buffer of size %u.\n",
369 (unsigned int)data_len,
370 (unsigned int)prs_data_size(&p->in_data.data)));
371 set_incoming_fault(p);
375 if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
378 * Ok - we finally have a complete RPC stream.
379 * Call the rpc command to process it.
383 * Ensure the internal prs buffer size is *exactly* the same
384 * size as the current offset.
387 if (!prs_set_buffer_size(&p->in_data.data,
388 prs_offset(&p->in_data.data))) {
389 DEBUG(0, ("process_request_pdu: "
390 "Call to prs_set_buffer_size failed!\n"));
391 set_incoming_fault(p);
396 * Set the parse offset to the start of the data and set the
397 * prs_struct to UNMARSHALL.
400 prs_set_offset(&p->in_data.data, 0);
401 prs_switch_type(&p->in_data.data, UNMARSHALL);
404 * Process the complete data stream here.
407 if(pipe_init_outgoing_data(p)) {
408 ret = api_pipe_request(p);
417 /****************************************************************************
418 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
419 already been parsed and stored in p->hdr.
420 ****************************************************************************/
422 static void process_complete_pdu(pipes_struct *p)
425 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
426 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
430 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
431 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
432 set_incoming_fault(p);
433 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
437 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
440 * Ensure we're using the corrent endianness for both the
441 * RPC header flags and the raw data we will be reading from.
444 prs_set_endian_data( &rpc_in, p->endian);
445 prs_set_endian_data( &p->in_data.data, p->endian);
447 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
449 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
450 (unsigned int)p->hdr.pkt_type ));
452 switch (p->hdr.pkt_type) {
453 case DCERPC_PKT_REQUEST:
454 reply = process_request_pdu(p, &rpc_in);
457 case DCERPC_PKT_PING: /* CL request - ignore... */
458 DEBUG(0, ("process_complete_pdu: Error. "
459 "Connectionless packet type %u received on "
460 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
461 get_pipe_name_from_syntax(talloc_tos(),
465 case DCERPC_PKT_RESPONSE: /* No responses here. */
466 DEBUG(0, ("process_complete_pdu: Error. "
467 "DCERPC_PKT_RESPONSE received from client "
469 get_pipe_name_from_syntax(talloc_tos(),
473 case DCERPC_PKT_FAULT:
474 case DCERPC_PKT_WORKING:
475 /* CL request - reply to a ping when a call in process. */
476 case DCERPC_PKT_NOCALL:
477 /* CL - server reply to a ping call. */
478 case DCERPC_PKT_REJECT:
480 case DCERPC_PKT_CL_CANCEL:
481 case DCERPC_PKT_FACK:
482 case DCERPC_PKT_CANCEL_ACK:
483 DEBUG(0, ("process_complete_pdu: Error. "
484 "Connectionless packet type %u received on "
485 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
486 get_pipe_name_from_syntax(talloc_tos(),
490 case DCERPC_PKT_BIND:
492 * We assume that a pipe bind is only in one pdu.
494 if(pipe_init_outgoing_data(p)) {
495 reply = api_pipe_bind_req(p, &rpc_in);
499 case DCERPC_PKT_BIND_ACK:
500 case DCERPC_PKT_BIND_NAK:
501 DEBUG(0, ("process_complete_pdu: Error. "
502 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
503 "packet type %u received on pipe %s.\n",
504 (unsigned int)p->hdr.pkt_type,
505 get_pipe_name_from_syntax(talloc_tos(),
510 case DCERPC_PKT_ALTER:
512 * We assume that a pipe bind is only in one pdu.
514 if(pipe_init_outgoing_data(p)) {
515 reply = api_pipe_alter_context(p, &rpc_in);
519 case DCERPC_PKT_ALTER_RESP:
520 DEBUG(0, ("process_complete_pdu: Error. "
521 "DCERPC_PKT_ALTER_RESP on pipe %s: "
522 "Should only be server -> client.\n",
523 get_pipe_name_from_syntax(talloc_tos(),
527 case DCERPC_PKT_AUTH3:
529 * The third packet in an NTLMSSP auth exchange.
531 if(pipe_init_outgoing_data(p)) {
532 reply = api_pipe_bind_auth3(p, &rpc_in);
536 case DCERPC_PKT_SHUTDOWN:
537 DEBUG(0, ("process_complete_pdu: Error. "
538 "DCERPC_PKT_SHUTDOWN on pipe %s: "
539 "Should only be server -> client.\n",
540 get_pipe_name_from_syntax(talloc_tos(),
544 case DCERPC_PKT_CO_CANCEL:
545 /* For now just free all client data and continue
547 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
548 " Abandoning rpc call.\n"));
549 /* As we never do asynchronous RPC serving, we can
550 * never cancel a call (as far as I know).
551 * If we ever did we'd have to send a cancel_ack reply.
552 * For now, just free all client data and continue
557 /* Enable this if we're doing async rpc. */
558 /* We must check the outstanding callid matches. */
559 if(pipe_init_outgoing_data(p)) {
560 /* Send a cancel_ack PDU reply. */
561 /* We should probably check the auth-verifier here. */
562 reply = setup_cancel_ack_reply(p, &rpc_in);
567 case DCERPC_PKT_ORPHANED:
568 /* We should probably check the auth-verifier here.
569 * For now just free all client data and continue
571 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
572 " Abandoning rpc call.\n"));
577 DEBUG(0, ("process_complete_pdu: "
578 "Unknown rpc type = %u received.\n",
579 (unsigned int)p->hdr.pkt_type));
583 /* Reset to little endian.
584 * Probably don't need this but it won't hurt. */
585 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
588 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
589 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
591 set_incoming_fault(p);
592 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
593 prs_mem_free(&rpc_in);
596 * Reset the lengths. We're ready for a new pdu.
598 TALLOC_FREE(p->in_data.current_in_pdu);
599 p->in_data.pdu_needed_len = 0;
600 p->in_data.pdu_received_len = 0;
603 prs_mem_free(&rpc_in);
606 /****************************************************************************
607 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
608 ****************************************************************************/
610 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
612 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
613 - p->in_data.pdu_received_len);
615 DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
616 "pdu_needed_len = %u, incoming data = %u\n",
617 (unsigned int)p->in_data.pdu_received_len,
618 (unsigned int)p->in_data.pdu_needed_len,
621 if(data_to_copy == 0) {
623 * This is an error - data is being received and there is no
624 * space in the PDU. Free the received data and go into the
627 DEBUG(0, ("process_incoming_data: "
628 "No space in incoming pdu buffer. "
629 "Current size = %u incoming data size = %u\n",
630 (unsigned int)p->in_data.pdu_received_len,
632 set_incoming_fault(p);
637 * If we have no data already, wait until we get at least
638 * a RPC_HEADER_LEN * number of bytes before we can do anything.
641 if ((p->in_data.pdu_needed_len == 0) &&
642 (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
644 * Always return here. If we have more data then the RPC_HEADER
645 * will be processed the next time around the loop.
647 return fill_rpc_header(p, data, data_to_copy);
651 * At this point we know we have at least an RPC_HEADER_LEN amount of
652 * data * stored in current_in_pdu.
656 * If pdu_needed_len is zero this is a new pdu.
657 * Unmarshall the header so we know how much more
658 * data we need, then loop again.
661 if(p->in_data.pdu_needed_len == 0) {
662 ssize_t rret = unmarshall_rpc_header(p);
663 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
666 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
667 * that consists of an RPC_HEADER only. This is a
668 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
669 * DCERPC_PKT_ORPHANED pdu type.
670 * Deal with this in process_complete_pdu(). */
674 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
675 * Keep reading until we have a full pdu.
678 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
681 * Copy as much of the data as we need into the current_in_pdu buffer.
682 * pdu_needed_len becomes zero when we have a complete pdu.
685 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
687 p->in_data.pdu_received_len += data_to_copy;
688 p->in_data.pdu_needed_len -= data_to_copy;
691 * Do we have a complete PDU ?
692 * (return the number of bytes handled in the call)
695 if(p->in_data.pdu_needed_len == 0) {
696 process_complete_pdu(p);
700 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
701 "pdu_received_len = %u, pdu_needed_len = %u\n",
702 (unsigned int)p->in_data.pdu_received_len,
703 (unsigned int)p->in_data.pdu_needed_len));
705 return (ssize_t)data_to_copy;
708 /****************************************************************************
709 Accepts incoming data on an internal rpc pipe.
710 ****************************************************************************/
712 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
714 size_t data_left = n;
719 DEBUG(10, ("write_to_pipe: data_left = %u\n",
720 (unsigned int)data_left));
722 data_used = process_incoming_data(p, data, data_left);
724 DEBUG(10, ("write_to_pipe: data_used = %d\n",
731 data_left -= data_used;
738 /****************************************************************************
739 Replies to a request to read data from a pipe.
741 Headers are interspersed with the data at PDU intervals. By the time
742 this function is called, the start of the data could possibly have been
743 read by an SMBtrans (file_offset != 0).
745 Calling create_rpc_reply() here is a hack. The data should already
746 have been prepared into arrays of headers + data stream sections.
747 ****************************************************************************/
749 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
750 size_t n, bool *is_data_outstanding)
752 uint32 pdu_remaining = 0;
753 ssize_t data_returned = 0;
756 DEBUG(0,("read_from_pipe: pipe not open\n"));
760 DEBUG(6,(" name: %s len: %u\n",
761 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
765 * We cannot return more than one PDU length per
770 * This condition should result in the connection being closed.
771 * Netapp filers seem to set it to 0xffff which results in domain
772 * authentications failing. Just ignore it so things work.
775 if(n > RPC_MAX_PDU_FRAG_LEN) {
776 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
777 "pipe %s. We can only service %d sized reads.\n",
779 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
780 RPC_MAX_PDU_FRAG_LEN ));
781 n = RPC_MAX_PDU_FRAG_LEN;
785 * Determine if there is still data to send in the
786 * pipe PDU buffer. Always send this first. Never
787 * send more than is left in the current PDU. The
788 * client should send a new read request for a new
792 pdu_remaining = prs_offset(&p->out_data.frag)
793 - p->out_data.current_pdu_sent;
795 if (pdu_remaining > 0) {
796 data_returned = (ssize_t)MIN(n, pdu_remaining);
798 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
799 "current_pdu_sent = %u returning %d bytes.\n",
800 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
801 (unsigned int)prs_offset(&p->out_data.frag),
802 (unsigned int)p->out_data.current_pdu_sent,
803 (int)data_returned));
806 prs_data_p(&p->out_data.frag)
807 + p->out_data.current_pdu_sent,
810 p->out_data.current_pdu_sent += (uint32)data_returned;
815 * At this point p->current_pdu_len == p->current_pdu_sent (which
816 * may of course be zero if this is the first return fragment.
819 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
820 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
821 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
823 (unsigned int)p->out_data.data_sent_length,
824 (unsigned int)prs_offset(&p->out_data.rdata) ));
826 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
828 * We have sent all possible data, return 0.
835 * We need to create a new PDU from the data left in p->rdata.
836 * Create the header/data/footers. This also sets up the fields
837 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
838 * and stores the outgoing PDU in p->current_pdu.
841 if(!create_next_pdu(p)) {
842 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
843 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
847 data_returned = MIN(n, prs_offset(&p->out_data.frag));
849 memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
850 p->out_data.current_pdu_sent += (uint32)data_returned;
853 (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
855 if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
856 /* We've returned everything in the out_data.frag
857 * so we're done with this pdu. Free it and reset
858 * current_pdu_sent. */
859 p->out_data.current_pdu_sent = 0;
860 prs_mem_free(&p->out_data.frag);
862 if (p->out_data.data_sent_length
863 >= prs_offset(&p->out_data.rdata)) {
865 * We're completely finished with both outgoing and
866 * incoming data streams. It's safe to free all
867 * temporary data from this request.
869 free_pipe_context(p);
873 return data_returned;
876 bool fsp_is_np(struct files_struct *fsp)
878 enum FAKE_FILE_TYPE type;
880 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
884 type = fsp->fake_file_handle->type;
886 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
887 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
890 struct np_proxy_state {
892 uint16_t device_state;
893 uint64_t allocation_size;
894 struct tstream_context *npipe;
895 struct tevent_queue *read_queue;
896 struct tevent_queue *write_queue;
899 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
900 const char *pipe_name,
901 const struct tsocket_address *local_address,
902 const struct tsocket_address *remote_address,
903 struct auth_serversupplied_info *server_info)
905 struct np_proxy_state *result;
907 const char *socket_dir;
908 struct tevent_context *ev;
909 struct tevent_req *subreq;
910 struct netr_SamInfo3 *info3;
916 result = talloc(mem_ctx, struct np_proxy_state);
917 if (result == NULL) {
918 DEBUG(0, ("talloc failed\n"));
922 result->read_queue = tevent_queue_create(result, "np_read");
923 if (result->read_queue == NULL) {
924 DEBUG(0, ("tevent_queue_create failed\n"));
928 result->write_queue = tevent_queue_create(result, "np_write");
929 if (result->write_queue == NULL) {
930 DEBUG(0, ("tevent_queue_create failed\n"));
934 ev = s3_tevent_context_init(talloc_tos());
936 DEBUG(0, ("s3_tevent_context_init failed\n"));
940 socket_dir = lp_parm_const_string(
941 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
942 get_dyn_NCALRPCDIR());
943 if (socket_dir == NULL) {
944 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
947 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
948 if (socket_np_dir == NULL) {
949 DEBUG(0, ("talloc_asprintf failed\n"));
953 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
955 DEBUG(0, ("talloc failed\n"));
959 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
960 if (!NT_STATUS_IS_OK(status)) {
962 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
968 subreq = tstream_npa_connect_send(talloc_tos(), ev,
971 remote_address, /* client_addr */
972 NULL, /* client_name */
973 local_address, /* server_addr */
974 NULL, /* server_name */
976 server_info->user_session_key,
977 data_blob_null /* delegated_creds */);
978 if (subreq == NULL) {
980 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
981 "user %s\\%s failed\n",
982 socket_np_dir, pipe_name, info3->base.domain.string,
983 info3->base.account_name.string));
986 ok = tevent_req_poll(subreq, ev);
989 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
990 "failed for tstream_npa_connect: %s\n",
991 socket_np_dir, pipe_name, info3->base.domain.string,
992 info3->base.account_name.string,
997 ret = tstream_npa_connect_recv(subreq, &sys_errno,
1001 &result->device_state,
1002 &result->allocation_size);
1003 TALLOC_FREE(subreq);
1005 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
1006 "user %s\\%s failed: %s\n",
1007 socket_np_dir, pipe_name, info3->base.domain.string,
1008 info3->base.account_name.string,
1009 strerror(sys_errno)));
1016 TALLOC_FREE(result);
1020 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1021 const struct tsocket_address *local_address,
1022 const struct tsocket_address *remote_address,
1023 struct auth_serversupplied_info *server_info,
1024 struct fake_file_handle **phandle)
1026 const char **proxy_list;
1027 struct fake_file_handle *handle;
1029 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1031 handle = talloc(mem_ctx, struct fake_file_handle);
1032 if (handle == NULL) {
1033 return NT_STATUS_NO_MEMORY;
1036 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1037 struct np_proxy_state *p;
1039 p = make_external_rpc_pipe_p(handle, name,
1044 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1045 handle->private_data = p;
1047 struct pipes_struct *p;
1048 struct ndr_syntax_id syntax;
1049 const char *client_address;
1051 if (!is_known_pipename(name, &syntax)) {
1052 TALLOC_FREE(handle);
1053 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1056 if (tsocket_address_is_inet(remote_address, "ip")) {
1057 client_address = tsocket_address_inet_addr_string(
1060 if (client_address == NULL) {
1061 TALLOC_FREE(handle);
1062 return NT_STATUS_NO_MEMORY;
1065 client_address = "";
1068 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1071 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1072 handle->private_data = p;
1075 if (handle->private_data == NULL) {
1076 TALLOC_FREE(handle);
1077 return NT_STATUS_PIPE_NOT_AVAILABLE;
1082 return NT_STATUS_OK;
1085 bool np_read_in_progress(struct fake_file_handle *handle)
1087 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1091 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1092 struct np_proxy_state *p = talloc_get_type_abort(
1093 handle->private_data, struct np_proxy_state);
1096 read_count = tevent_queue_length(p->read_queue);
1097 if (read_count > 0) {
1107 struct np_write_state {
1108 struct event_context *ev;
1109 struct np_proxy_state *p;
1114 static void np_write_done(struct tevent_req *subreq);
1116 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1117 struct fake_file_handle *handle,
1118 const uint8_t *data, size_t len)
1120 struct tevent_req *req;
1121 struct np_write_state *state;
1124 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1125 dump_data(50, data, len);
1127 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1133 state->nwritten = 0;
1134 status = NT_STATUS_OK;
1138 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1139 struct pipes_struct *p = talloc_get_type_abort(
1140 handle->private_data, struct pipes_struct);
1142 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1144 status = (state->nwritten >= 0)
1145 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1149 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1150 struct np_proxy_state *p = talloc_get_type_abort(
1151 handle->private_data, struct np_proxy_state);
1152 struct tevent_req *subreq;
1156 state->iov.iov_base = CONST_DISCARD(void *, data);
1157 state->iov.iov_len = len;
1159 subreq = tstream_writev_queue_send(state, ev,
1163 if (subreq == NULL) {
1166 tevent_req_set_callback(subreq, np_write_done, req);
1170 status = NT_STATUS_INVALID_HANDLE;
1172 if (NT_STATUS_IS_OK(status)) {
1173 tevent_req_done(req);
1175 tevent_req_nterror(req, status);
1177 return tevent_req_post(req, ev);
1183 static void np_write_done(struct tevent_req *subreq)
1185 struct tevent_req *req = tevent_req_callback_data(
1186 subreq, struct tevent_req);
1187 struct np_write_state *state = tevent_req_data(
1188 req, struct np_write_state);
1192 received = tstream_writev_queue_recv(subreq, &err);
1194 tevent_req_nterror(req, map_nt_error_from_unix(err));
1197 state->nwritten = received;
1198 tevent_req_done(req);
1201 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1203 struct np_write_state *state = tevent_req_data(
1204 req, struct np_write_state);
1207 if (tevent_req_is_nterror(req, &status)) {
1210 *pnwritten = state->nwritten;
1211 return NT_STATUS_OK;
1214 struct np_ipc_readv_next_vector_state {
1221 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1222 uint8_t *buf, size_t len)
1227 s->len = MIN(len, UINT16_MAX);
1230 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1232 TALLOC_CTX *mem_ctx,
1233 struct iovec **_vector,
1236 struct np_ipc_readv_next_vector_state *state =
1237 (struct np_ipc_readv_next_vector_state *)private_data;
1238 struct iovec *vector;
1242 if (state->ofs == state->len) {
1248 pending = tstream_pending_bytes(stream);
1249 if (pending == -1) {
1253 if (pending == 0 && state->ofs != 0) {
1254 /* return a short read */
1261 /* we want at least one byte and recheck again */
1264 size_t missing = state->len - state->ofs;
1265 if (pending > missing) {
1266 /* there's more available */
1267 state->remaining = pending - missing;
1270 /* read what we can get and recheck in the next cycle */
1275 vector = talloc_array(mem_ctx, struct iovec, 1);
1280 vector[0].iov_base = state->buf + state->ofs;
1281 vector[0].iov_len = wanted;
1283 state->ofs += wanted;
1290 struct np_read_state {
1291 struct np_proxy_state *p;
1292 struct np_ipc_readv_next_vector_state next_vector;
1295 bool is_data_outstanding;
1298 static void np_read_done(struct tevent_req *subreq);
1300 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1301 struct fake_file_handle *handle,
1302 uint8_t *data, size_t len)
1304 struct tevent_req *req;
1305 struct np_read_state *state;
1308 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1313 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1314 struct pipes_struct *p = talloc_get_type_abort(
1315 handle->private_data, struct pipes_struct);
1317 state->nread = read_from_internal_pipe(
1318 p, (char *)data, len, &state->is_data_outstanding);
1320 status = (state->nread >= 0)
1321 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1325 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1326 struct np_proxy_state *p = talloc_get_type_abort(
1327 handle->private_data, struct np_proxy_state);
1328 struct tevent_req *subreq;
1330 np_ipc_readv_next_vector_init(&state->next_vector,
1333 subreq = tstream_readv_pdu_queue_send(state,
1337 np_ipc_readv_next_vector,
1338 &state->next_vector);
1339 if (subreq == NULL) {
1342 tevent_req_set_callback(subreq, np_read_done, req);
1346 status = NT_STATUS_INVALID_HANDLE;
1348 if (NT_STATUS_IS_OK(status)) {
1349 tevent_req_done(req);
1351 tevent_req_nterror(req, status);
1353 return tevent_req_post(req, ev);
1356 static void np_read_done(struct tevent_req *subreq)
1358 struct tevent_req *req = tevent_req_callback_data(
1359 subreq, struct tevent_req);
1360 struct np_read_state *state = tevent_req_data(
1361 req, struct np_read_state);
1365 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1366 TALLOC_FREE(subreq);
1368 tevent_req_nterror(req, map_nt_error_from_unix(err));
1373 state->is_data_outstanding = (state->next_vector.remaining > 0);
1375 tevent_req_done(req);
1379 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1380 bool *is_data_outstanding)
1382 struct np_read_state *state = tevent_req_data(
1383 req, struct np_read_state);
1386 if (tevent_req_is_nterror(req, &status)) {
1389 *nread = state->nread;
1390 *is_data_outstanding = state->is_data_outstanding;
1391 return NT_STATUS_OK;
1395 * @brief Create a new RPC client context which uses a local dispatch function.
1397 * @param[in] conn The connection struct that will hold the pipe
1399 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1401 * @return NT_STATUS_OK on success, a corresponding NT status if an
1404 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1405 struct rpc_pipe_client **spoolss_pipe)
1409 /* TODO: check and handle disconnections */
1411 if (!conn->spoolss_pipe) {
1412 status = rpc_pipe_open_internal(conn,
1413 &ndr_table_spoolss.syntax_id,
1415 &conn->spoolss_pipe);
1416 if (!NT_STATUS_IS_OK(status)) {
1421 *spoolss_pipe = conn->spoolss_pipe;
1422 return NT_STATUS_OK;