2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
28 #define DBGC_CLASS DBGC_RPC_SRV
30 /****************************************************************************
31 Initialise an outgoing packet.
32 ****************************************************************************/
34 static bool pipe_init_outgoing_data(pipes_struct *p)
36 output_data *o_data = &p->out_data;
38 /* Reset the offset counters. */
39 o_data->data_sent_length = 0;
40 o_data->current_pdu_sent = 0;
42 prs_mem_free(&o_data->frag);
44 /* Free any memory in the current return data buffer. */
45 prs_mem_free(&o_data->rdata);
48 * Initialize the outgoing RPC data buffer.
49 * we will use this as the raw data area for replying to rpc requests.
51 if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
59 /****************************************************************************
60 Sets the fault state on incoming packets.
61 ****************************************************************************/
63 static void set_incoming_fault(pipes_struct *p)
65 prs_mem_free(&p->in_data.data);
66 p->in_data.pdu_needed_len = 0;
67 p->in_data.pdu_received_len = 0;
68 p->fault_state = True;
69 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
73 /****************************************************************************
74 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
79 size_t len_needed_to_complete_hdr =
80 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
82 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83 "len_needed_to_complete_hdr = %u, "
85 (unsigned int)data_to_copy,
86 (unsigned int)len_needed_to_complete_hdr,
87 (unsigned int)p->in_data.pdu_received_len ));
89 if (p->in_data.current_in_pdu == NULL) {
90 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
93 if (p->in_data.current_in_pdu == NULL) {
94 DEBUG(0, ("talloc failed\n"));
98 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99 data, len_needed_to_complete_hdr);
100 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
102 return (ssize_t)len_needed_to_complete_hdr;
105 /****************************************************************************
106 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
107 ****************************************************************************/
109 static ssize_t unmarshall_rpc_header(pipes_struct *p)
112 * Unmarshall the header to determine the needed length.
117 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
118 DEBUG(0, ("unmarshall_rpc_header: "
119 "assert on rpc header length failed.\n"));
120 set_incoming_fault(p);
124 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
125 prs_set_endian_data( &rpc_in, p->endian);
127 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
128 p->in_data.pdu_received_len, False);
131 * Unmarshall the header as this will tell us how much
132 * data we need to read to get the complete pdu.
133 * This also sets the endian flag in rpc_in.
136 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
137 DEBUG(0, ("unmarshall_rpc_header: "
138 "failed to unmarshall RPC_HDR.\n"));
139 set_incoming_fault(p);
140 prs_mem_free(&rpc_in);
145 * Validate the RPC header.
148 if(p->hdr.major != 5 && p->hdr.minor != 0) {
149 DEBUG(0, ("unmarshall_rpc_header: "
150 "invalid major/minor numbers in RPC_HDR.\n"));
151 set_incoming_fault(p);
152 prs_mem_free(&rpc_in);
157 * If there's not data in the incoming buffer this should be the
158 * start of a new RPC.
161 if(prs_offset(&p->in_data.data) == 0) {
164 * AS/U doesn't set FIRST flag in a BIND packet it seems.
167 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
168 !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
170 * Ensure that the FIRST flag is set.
171 * If not then we have a stream missmatch.
174 DEBUG(0, ("unmarshall_rpc_header: "
175 "FIRST flag not set in first PDU !\n"));
176 set_incoming_fault(p);
177 prs_mem_free(&rpc_in);
182 * If this is the first PDU then set the endianness
183 * flag in the pipe. We will need this when parsing all
187 p->endian = rpc_in.bigendian_data;
189 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
190 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
195 * If this is *NOT* the first PDU then check the endianness
196 * flag in the pipe is the same as that in the PDU.
199 if (p->endian != rpc_in.bigendian_data) {
200 DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
201 "flag (%d) different in next PDU !\n",
203 set_incoming_fault(p);
204 prs_mem_free(&rpc_in);
210 * Ensure that the pdu length is sane.
213 if ((p->hdr.frag_len < RPC_HEADER_LEN) ||
214 (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
215 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
216 set_incoming_fault(p);
217 prs_mem_free(&rpc_in);
221 DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
222 (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
224 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
226 prs_mem_free(&rpc_in);
228 p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
229 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
230 if (p->in_data.current_in_pdu == NULL) {
231 DEBUG(0, ("talloc failed\n"));
232 set_incoming_fault(p);
236 return 0; /* No extra data processed. */
239 /****************************************************************************
240 Call this to free any talloc'ed memory. Do this before and after processing
242 ****************************************************************************/
244 static void free_pipe_context(pipes_struct *p)
247 DEBUG(3, ("free_pipe_context: "
248 "destroying talloc pool of size %lu\n",
249 (unsigned long)talloc_total_size(p->mem_ctx)));
250 talloc_free_children(p->mem_ctx);
252 p->mem_ctx = talloc_named(p, 0, "pipe %s %p",
253 get_pipe_name_from_syntax(talloc_tos(),
255 if (p->mem_ctx == NULL) {
256 p->fault_state = True;
261 /****************************************************************************
262 Processes a request pdu. This will do auth processing if needed, and
263 appends the data into the complete stream if the LAST flag is not set.
264 ****************************************************************************/
266 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
268 uint32 ss_padding_len = 0;
269 size_t data_len = p->hdr.frag_len
272 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
276 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
277 set_incoming_fault(p);
282 * Check if we need to do authentication processing.
283 * This is only done on requests, not binds.
287 * Read the RPC request header.
290 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
291 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
292 set_incoming_fault(p);
296 switch(p->auth.auth_type) {
297 case PIPE_AUTH_TYPE_NONE:
300 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
301 case PIPE_AUTH_TYPE_NTLMSSP:
304 if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
307 DEBUG(0, ("process_request_pdu: "
308 "failed to do auth processing.\n"));
309 DEBUG(0, ("process_request_pdu: error is %s\n",
311 set_incoming_fault(p);
317 case PIPE_AUTH_TYPE_SCHANNEL:
318 if (!api_pipe_schannel_process(p, rpc_in_p,
320 DEBUG(3, ("process_request_pdu: "
321 "failed to do schannel processing.\n"));
322 set_incoming_fault(p);
328 DEBUG(0, ("process_request_pdu: "
329 "unknown auth type %u set.\n",
330 (unsigned int)p->auth.auth_type));
331 set_incoming_fault(p);
335 /* Now we've done the sign/seal we can remove any padding data. */
336 if (data_len > ss_padding_len) {
337 data_len -= ss_padding_len;
341 * Check the data length doesn't go over the 15Mb limit.
342 * increased after observing a bug in the Windows NT 4.0 SP6a
343 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
344 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
347 if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
348 DEBUG(0, ("process_request_pdu: "
349 "rpc data buffer too large (%u) + (%u)\n",
350 (unsigned int)prs_data_size(&p->in_data.data),
351 (unsigned int)data_len ));
352 set_incoming_fault(p);
357 * Append the data portion into the buffer and return.
360 if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
361 prs_offset(rpc_in_p), data_len)) {
362 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
363 "to parse buffer of size %u.\n",
364 (unsigned int)data_len,
365 (unsigned int)prs_data_size(&p->in_data.data)));
366 set_incoming_fault(p);
370 if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
373 * Ok - we finally have a complete RPC stream.
374 * Call the rpc command to process it.
378 * Ensure the internal prs buffer size is *exactly* the same
379 * size as the current offset.
382 if (!prs_set_buffer_size(&p->in_data.data,
383 prs_offset(&p->in_data.data))) {
384 DEBUG(0, ("process_request_pdu: "
385 "Call to prs_set_buffer_size failed!\n"));
386 set_incoming_fault(p);
391 * Set the parse offset to the start of the data and set the
392 * prs_struct to UNMARSHALL.
395 prs_set_offset(&p->in_data.data, 0);
396 prs_switch_type(&p->in_data.data, UNMARSHALL);
399 * Process the complete data stream here.
402 free_pipe_context(p);
404 if(pipe_init_outgoing_data(p)) {
405 ret = api_pipe_request(p);
408 free_pipe_context(p);
411 * We have consumed the whole data stream. Set back to
412 * marshalling and set the offset back to the start of
413 * the buffer to re-use it (we could also do a prs_mem_free()
414 * and then re_init on the next start of PDU. Not sure which
415 * is best here.... JRA.
418 prs_switch_type(&p->in_data.data, MARSHALL);
419 prs_set_offset(&p->in_data.data, 0);
426 /****************************************************************************
427 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
428 already been parsed and stored in p->hdr.
429 ****************************************************************************/
431 static void process_complete_pdu(pipes_struct *p)
434 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
435 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
439 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
440 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
441 set_incoming_fault(p);
442 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
446 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
449 * Ensure we're using the corrent endianness for both the
450 * RPC header flags and the raw data we will be reading from.
453 prs_set_endian_data( &rpc_in, p->endian);
454 prs_set_endian_data( &p->in_data.data, p->endian);
456 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
458 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
459 (unsigned int)p->hdr.pkt_type ));
461 switch (p->hdr.pkt_type) {
462 case DCERPC_PKT_REQUEST:
463 reply = process_request_pdu(p, &rpc_in);
466 case DCERPC_PKT_PING: /* CL request - ignore... */
467 DEBUG(0, ("process_complete_pdu: Error. "
468 "Connectionless packet type %u received on "
469 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
470 get_pipe_name_from_syntax(talloc_tos(),
474 case DCERPC_PKT_RESPONSE: /* No responses here. */
475 DEBUG(0, ("process_complete_pdu: Error. "
476 "DCERPC_PKT_RESPONSE received from client "
478 get_pipe_name_from_syntax(talloc_tos(),
482 case DCERPC_PKT_FAULT:
483 case DCERPC_PKT_WORKING:
484 /* CL request - reply to a ping when a call in process. */
485 case DCERPC_PKT_NOCALL:
486 /* CL - server reply to a ping call. */
487 case DCERPC_PKT_REJECT:
489 case DCERPC_PKT_CL_CANCEL:
490 case DCERPC_PKT_FACK:
491 case DCERPC_PKT_CANCEL_ACK:
492 DEBUG(0, ("process_complete_pdu: Error. "
493 "Connectionless packet type %u received on "
494 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
495 get_pipe_name_from_syntax(talloc_tos(),
499 case DCERPC_PKT_BIND:
501 * We assume that a pipe bind is only in one pdu.
503 if(pipe_init_outgoing_data(p)) {
504 reply = api_pipe_bind_req(p, &rpc_in);
508 case DCERPC_PKT_BIND_ACK:
509 case DCERPC_PKT_BIND_NAK:
510 DEBUG(0, ("process_complete_pdu: Error. "
511 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
512 "packet type %u received on pipe %s.\n",
513 (unsigned int)p->hdr.pkt_type,
514 get_pipe_name_from_syntax(talloc_tos(),
519 case DCERPC_PKT_ALTER:
521 * We assume that a pipe bind is only in one pdu.
523 if(pipe_init_outgoing_data(p)) {
524 reply = api_pipe_alter_context(p, &rpc_in);
528 case DCERPC_PKT_ALTER_RESP:
529 DEBUG(0, ("process_complete_pdu: Error. "
530 "DCERPC_PKT_ALTER_RESP on pipe %s: "
531 "Should only be server -> client.\n",
532 get_pipe_name_from_syntax(talloc_tos(),
536 case DCERPC_PKT_AUTH3:
538 * The third packet in an NTLMSSP auth exchange.
540 if(pipe_init_outgoing_data(p)) {
541 reply = api_pipe_bind_auth3(p, &rpc_in);
545 case DCERPC_PKT_SHUTDOWN:
546 DEBUG(0, ("process_complete_pdu: Error. "
547 "DCERPC_PKT_SHUTDOWN on pipe %s: "
548 "Should only be server -> client.\n",
549 get_pipe_name_from_syntax(talloc_tos(),
553 case DCERPC_PKT_CO_CANCEL:
554 /* For now just free all client data and continue
556 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
557 " Abandoning rpc call.\n"));
558 /* As we never do asynchronous RPC serving, we can
559 * never cancel a call (as far as I know).
560 * If we ever did we'd have to send a cancel_ack reply.
561 * For now, just free all client data and continue
566 /* Enable this if we're doing async rpc. */
567 /* We must check the outstanding callid matches. */
568 if(pipe_init_outgoing_data(p)) {
569 /* Send a cancel_ack PDU reply. */
570 /* We should probably check the auth-verifier here. */
571 reply = setup_cancel_ack_reply(p, &rpc_in);
576 case DCERPC_PKT_ORPHANED:
577 /* We should probably check the auth-verifier here.
578 * For now just free all client data and continue
580 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
581 " Abandoning rpc call.\n"));
586 DEBUG(0, ("process_complete_pdu: "
587 "Unknown rpc type = %u received.\n",
588 (unsigned int)p->hdr.pkt_type));
592 /* Reset to little endian.
593 * Probably don't need this but it won't hurt. */
594 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
597 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
598 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
600 set_incoming_fault(p);
601 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
602 prs_mem_free(&rpc_in);
605 * Reset the lengths. We're ready for a new pdu.
607 TALLOC_FREE(p->in_data.current_in_pdu);
608 p->in_data.pdu_needed_len = 0;
609 p->in_data.pdu_received_len = 0;
612 prs_mem_free(&rpc_in);
615 /****************************************************************************
616 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
617 ****************************************************************************/
619 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
621 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
622 - p->in_data.pdu_received_len);
624 DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
625 "pdu_needed_len = %u, incoming data = %u\n",
626 (unsigned int)p->in_data.pdu_received_len,
627 (unsigned int)p->in_data.pdu_needed_len,
630 if(data_to_copy == 0) {
632 * This is an error - data is being received and there is no
633 * space in the PDU. Free the received data and go into the
636 DEBUG(0, ("process_incoming_data: "
637 "No space in incoming pdu buffer. "
638 "Current size = %u incoming data size = %u\n",
639 (unsigned int)p->in_data.pdu_received_len,
641 set_incoming_fault(p);
646 * If we have no data already, wait until we get at least
647 * a RPC_HEADER_LEN * number of bytes before we can do anything.
650 if ((p->in_data.pdu_needed_len == 0) &&
651 (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
653 * Always return here. If we have more data then the RPC_HEADER
654 * will be processed the next time around the loop.
656 return fill_rpc_header(p, data, data_to_copy);
660 * At this point we know we have at least an RPC_HEADER_LEN amount of
661 * data * stored in current_in_pdu.
665 * If pdu_needed_len is zero this is a new pdu.
666 * Unmarshall the header so we know how much more
667 * data we need, then loop again.
670 if(p->in_data.pdu_needed_len == 0) {
671 ssize_t rret = unmarshall_rpc_header(p);
672 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
675 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
676 * that consists of an RPC_HEADER only. This is a
677 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
678 * DCERPC_PKT_ORPHANED pdu type.
679 * Deal with this in process_complete_pdu(). */
683 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
684 * Keep reading until we have a full pdu.
687 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
690 * Copy as much of the data as we need into the current_in_pdu buffer.
691 * pdu_needed_len becomes zero when we have a complete pdu.
694 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
696 p->in_data.pdu_received_len += data_to_copy;
697 p->in_data.pdu_needed_len -= data_to_copy;
700 * Do we have a complete PDU ?
701 * (return the number of bytes handled in the call)
704 if(p->in_data.pdu_needed_len == 0) {
705 process_complete_pdu(p);
709 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
710 "pdu_received_len = %u, pdu_needed_len = %u\n",
711 (unsigned int)p->in_data.pdu_received_len,
712 (unsigned int)p->in_data.pdu_needed_len));
714 return (ssize_t)data_to_copy;
717 /****************************************************************************
718 Accepts incoming data on an internal rpc pipe.
719 ****************************************************************************/
721 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
723 size_t data_left = n;
728 DEBUG(10, ("write_to_pipe: data_left = %u\n",
729 (unsigned int)data_left));
731 data_used = process_incoming_data(p, data, data_left);
733 DEBUG(10, ("write_to_pipe: data_used = %d\n",
740 data_left -= data_used;
747 /****************************************************************************
748 Replies to a request to read data from a pipe.
750 Headers are interspersed with the data at PDU intervals. By the time
751 this function is called, the start of the data could possibly have been
752 read by an SMBtrans (file_offset != 0).
754 Calling create_rpc_reply() here is a hack. The data should already
755 have been prepared into arrays of headers + data stream sections.
756 ****************************************************************************/
758 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
759 size_t n, bool *is_data_outstanding)
761 uint32 pdu_remaining = 0;
762 ssize_t data_returned = 0;
765 DEBUG(0,("read_from_pipe: pipe not open\n"));
769 DEBUG(6,(" name: %s len: %u\n",
770 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
774 * We cannot return more than one PDU length per
779 * This condition should result in the connection being closed.
780 * Netapp filers seem to set it to 0xffff which results in domain
781 * authentications failing. Just ignore it so things work.
784 if(n > RPC_MAX_PDU_FRAG_LEN) {
785 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
786 "pipe %s. We can only service %d sized reads.\n",
788 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
789 RPC_MAX_PDU_FRAG_LEN ));
790 n = RPC_MAX_PDU_FRAG_LEN;
794 * Determine if there is still data to send in the
795 * pipe PDU buffer. Always send this first. Never
796 * send more than is left in the current PDU. The
797 * client should send a new read request for a new
801 pdu_remaining = prs_offset(&p->out_data.frag)
802 - p->out_data.current_pdu_sent;
804 if (pdu_remaining > 0) {
805 data_returned = (ssize_t)MIN(n, pdu_remaining);
807 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
808 "current_pdu_sent = %u returning %d bytes.\n",
809 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
810 (unsigned int)prs_offset(&p->out_data.frag),
811 (unsigned int)p->out_data.current_pdu_sent,
812 (int)data_returned));
815 prs_data_p(&p->out_data.frag)
816 + p->out_data.current_pdu_sent,
819 p->out_data.current_pdu_sent += (uint32)data_returned;
824 * At this point p->current_pdu_len == p->current_pdu_sent (which
825 * may of course be zero if this is the first return fragment.
828 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
829 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
830 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
832 (unsigned int)p->out_data.data_sent_length,
833 (unsigned int)prs_offset(&p->out_data.rdata) ));
835 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
837 * We have sent all possible data, return 0.
844 * We need to create a new PDU from the data left in p->rdata.
845 * Create the header/data/footers. This also sets up the fields
846 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
847 * and stores the outgoing PDU in p->current_pdu.
850 if(!create_next_pdu(p)) {
851 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
852 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
856 data_returned = MIN(n, prs_offset(&p->out_data.frag));
858 memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
859 p->out_data.current_pdu_sent += (uint32)data_returned;
862 (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
864 if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
865 /* We've returned everything in the out_data.frag
866 * so we're done with this pdu. Free it and reset
867 * current_pdu_sent. */
868 p->out_data.current_pdu_sent = 0;
869 prs_mem_free(&p->out_data.frag);
871 return data_returned;
874 bool fsp_is_np(struct files_struct *fsp)
876 enum FAKE_FILE_TYPE type;
878 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
882 type = fsp->fake_file_handle->type;
884 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
885 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
888 struct np_proxy_state {
890 uint16_t device_state;
891 uint64_t allocation_size;
892 struct tstream_context *npipe;
893 struct tevent_queue *read_queue;
894 struct tevent_queue *write_queue;
897 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
898 const char *pipe_name,
899 const struct tsocket_address *local_address,
900 const struct tsocket_address *remote_address,
901 struct auth_serversupplied_info *server_info)
903 struct np_proxy_state *result;
905 const char *socket_dir;
906 struct tevent_context *ev;
907 struct tevent_req *subreq;
908 struct netr_SamInfo3 *info3;
914 result = talloc(mem_ctx, struct np_proxy_state);
915 if (result == NULL) {
916 DEBUG(0, ("talloc failed\n"));
920 result->read_queue = tevent_queue_create(result, "np_read");
921 if (result->read_queue == NULL) {
922 DEBUG(0, ("tevent_queue_create failed\n"));
926 result->write_queue = tevent_queue_create(result, "np_write");
927 if (result->write_queue == NULL) {
928 DEBUG(0, ("tevent_queue_create failed\n"));
932 ev = s3_tevent_context_init(talloc_tos());
934 DEBUG(0, ("s3_tevent_context_init failed\n"));
938 socket_dir = lp_parm_const_string(
939 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
940 get_dyn_NCALRPCDIR());
941 if (socket_dir == NULL) {
942 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
945 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
946 if (socket_np_dir == NULL) {
947 DEBUG(0, ("talloc_asprintf failed\n"));
951 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
953 DEBUG(0, ("talloc failed\n"));
957 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
958 if (!NT_STATUS_IS_OK(status)) {
960 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
966 subreq = tstream_npa_connect_send(talloc_tos(), ev,
969 remote_address, /* client_addr */
970 NULL, /* client_name */
971 local_address, /* server_addr */
972 NULL, /* server_name */
974 server_info->user_session_key,
975 data_blob_null /* delegated_creds */);
976 if (subreq == NULL) {
978 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
979 "user %s\\%s failed\n",
980 socket_np_dir, pipe_name, info3->base.domain.string,
981 info3->base.account_name.string));
984 ok = tevent_req_poll(subreq, ev);
987 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
988 "failed for tstream_npa_connect: %s\n",
989 socket_np_dir, pipe_name, info3->base.domain.string,
990 info3->base.account_name.string,
995 ret = tstream_npa_connect_recv(subreq, &sys_errno,
999 &result->device_state,
1000 &result->allocation_size);
1001 TALLOC_FREE(subreq);
1003 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
1004 "user %s\\%s failed: %s\n",
1005 socket_np_dir, pipe_name, info3->base.domain.string,
1006 info3->base.account_name.string,
1007 strerror(sys_errno)));
1014 TALLOC_FREE(result);
1018 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1019 const struct tsocket_address *local_address,
1020 const struct tsocket_address *remote_address,
1021 struct auth_serversupplied_info *server_info,
1022 struct fake_file_handle **phandle)
1024 const char **proxy_list;
1025 struct fake_file_handle *handle;
1027 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1029 handle = talloc(mem_ctx, struct fake_file_handle);
1030 if (handle == NULL) {
1031 return NT_STATUS_NO_MEMORY;
1034 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1035 struct np_proxy_state *p;
1037 p = make_external_rpc_pipe_p(handle, name,
1042 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1043 handle->private_data = p;
1045 struct pipes_struct *p;
1046 struct ndr_syntax_id syntax;
1047 const char *client_address;
1049 if (!is_known_pipename(name, &syntax)) {
1050 TALLOC_FREE(handle);
1051 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1054 if (tsocket_address_is_inet(remote_address, "ip")) {
1055 client_address = tsocket_address_inet_addr_string(
1058 if (client_address == NULL) {
1059 TALLOC_FREE(handle);
1060 return NT_STATUS_NO_MEMORY;
1063 client_address = "";
1066 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1069 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1070 handle->private_data = p;
1073 if (handle->private_data == NULL) {
1074 TALLOC_FREE(handle);
1075 return NT_STATUS_PIPE_NOT_AVAILABLE;
1080 return NT_STATUS_OK;
1083 bool np_read_in_progress(struct fake_file_handle *handle)
1085 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1089 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1090 struct np_proxy_state *p = talloc_get_type_abort(
1091 handle->private_data, struct np_proxy_state);
1094 read_count = tevent_queue_length(p->read_queue);
1095 if (read_count > 0) {
1105 struct np_write_state {
1106 struct event_context *ev;
1107 struct np_proxy_state *p;
1112 static void np_write_done(struct tevent_req *subreq);
1114 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1115 struct fake_file_handle *handle,
1116 const uint8_t *data, size_t len)
1118 struct tevent_req *req;
1119 struct np_write_state *state;
1122 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1123 dump_data(50, data, len);
1125 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1131 state->nwritten = 0;
1132 status = NT_STATUS_OK;
1136 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1137 struct pipes_struct *p = talloc_get_type_abort(
1138 handle->private_data, struct pipes_struct);
1140 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1142 status = (state->nwritten >= 0)
1143 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1147 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1148 struct np_proxy_state *p = talloc_get_type_abort(
1149 handle->private_data, struct np_proxy_state);
1150 struct tevent_req *subreq;
1154 state->iov.iov_base = CONST_DISCARD(void *, data);
1155 state->iov.iov_len = len;
1157 subreq = tstream_writev_queue_send(state, ev,
1161 if (subreq == NULL) {
1164 tevent_req_set_callback(subreq, np_write_done, req);
1168 status = NT_STATUS_INVALID_HANDLE;
1170 if (NT_STATUS_IS_OK(status)) {
1171 tevent_req_done(req);
1173 tevent_req_nterror(req, status);
1175 return tevent_req_post(req, ev);
1181 static void np_write_done(struct tevent_req *subreq)
1183 struct tevent_req *req = tevent_req_callback_data(
1184 subreq, struct tevent_req);
1185 struct np_write_state *state = tevent_req_data(
1186 req, struct np_write_state);
1190 received = tstream_writev_queue_recv(subreq, &err);
1192 tevent_req_nterror(req, map_nt_error_from_unix(err));
1195 state->nwritten = received;
1196 tevent_req_done(req);
1199 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1201 struct np_write_state *state = tevent_req_data(
1202 req, struct np_write_state);
1205 if (tevent_req_is_nterror(req, &status)) {
1208 *pnwritten = state->nwritten;
1209 return NT_STATUS_OK;
1212 struct np_ipc_readv_next_vector_state {
1219 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1220 uint8_t *buf, size_t len)
1225 s->len = MIN(len, UINT16_MAX);
1228 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1230 TALLOC_CTX *mem_ctx,
1231 struct iovec **_vector,
1234 struct np_ipc_readv_next_vector_state *state =
1235 (struct np_ipc_readv_next_vector_state *)private_data;
1236 struct iovec *vector;
1240 if (state->ofs == state->len) {
1246 pending = tstream_pending_bytes(stream);
1247 if (pending == -1) {
1251 if (pending == 0 && state->ofs != 0) {
1252 /* return a short read */
1259 /* we want at least one byte and recheck again */
1262 size_t missing = state->len - state->ofs;
1263 if (pending > missing) {
1264 /* there's more available */
1265 state->remaining = pending - missing;
1268 /* read what we can get and recheck in the next cycle */
1273 vector = talloc_array(mem_ctx, struct iovec, 1);
1278 vector[0].iov_base = state->buf + state->ofs;
1279 vector[0].iov_len = wanted;
1281 state->ofs += wanted;
1288 struct np_read_state {
1289 struct np_proxy_state *p;
1290 struct np_ipc_readv_next_vector_state next_vector;
1293 bool is_data_outstanding;
1296 static void np_read_done(struct tevent_req *subreq);
1298 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1299 struct fake_file_handle *handle,
1300 uint8_t *data, size_t len)
1302 struct tevent_req *req;
1303 struct np_read_state *state;
1306 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1311 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1312 struct pipes_struct *p = talloc_get_type_abort(
1313 handle->private_data, struct pipes_struct);
1315 state->nread = read_from_internal_pipe(
1316 p, (char *)data, len, &state->is_data_outstanding);
1318 status = (state->nread >= 0)
1319 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1323 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1324 struct np_proxy_state *p = talloc_get_type_abort(
1325 handle->private_data, struct np_proxy_state);
1326 struct tevent_req *subreq;
1328 np_ipc_readv_next_vector_init(&state->next_vector,
1331 subreq = tstream_readv_pdu_queue_send(state,
1335 np_ipc_readv_next_vector,
1336 &state->next_vector);
1337 if (subreq == NULL) {
1340 tevent_req_set_callback(subreq, np_read_done, req);
1344 status = NT_STATUS_INVALID_HANDLE;
1346 if (NT_STATUS_IS_OK(status)) {
1347 tevent_req_done(req);
1349 tevent_req_nterror(req, status);
1351 return tevent_req_post(req, ev);
1354 static void np_read_done(struct tevent_req *subreq)
1356 struct tevent_req *req = tevent_req_callback_data(
1357 subreq, struct tevent_req);
1358 struct np_read_state *state = tevent_req_data(
1359 req, struct np_read_state);
1363 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1364 TALLOC_FREE(subreq);
1366 tevent_req_nterror(req, map_nt_error_from_unix(err));
1371 state->is_data_outstanding = (state->next_vector.remaining > 0);
1373 tevent_req_done(req);
1377 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1378 bool *is_data_outstanding)
1380 struct np_read_state *state = tevent_req_data(
1381 req, struct np_read_state);
1384 if (tevent_req_is_nterror(req, &status)) {
1387 *nread = state->nread;
1388 *is_data_outstanding = state->is_data_outstanding;
1389 return NT_STATUS_OK;
1393 * @brief Create a new RPC client context which uses a local dispatch function.
1395 * @param[in] conn The connection struct that will hold the pipe
1397 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1399 * @return NT_STATUS_OK on success, a corresponding NT status if an
1402 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1403 struct rpc_pipe_client **spoolss_pipe)
1407 /* TODO: check and handle disconnections */
1409 if (!conn->spoolss_pipe) {
1410 status = rpc_pipe_open_internal(conn,
1411 &ndr_table_spoolss.syntax_id,
1412 rpc_spoolss_dispatch,
1414 &conn->spoolss_pipe);
1415 if (!NT_STATUS_IS_OK(status)) {
1420 *spoolss_pipe = conn->spoolss_pipe;
1421 return NT_STATUS_OK;