975f5b823a3896202d913ea2f4982a6575acf828
[mat/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26
27 #undef DBGC_CLASS
28 #define DBGC_CLASS DBGC_RPC_SRV
29
30 /****************************************************************************
31  Initialise an outgoing packet.
32 ****************************************************************************/
33
34 static bool pipe_init_outgoing_data(pipes_struct *p)
35 {
36         output_data *o_data = &p->out_data;
37
38         /* Reset the offset counters. */
39         o_data->data_sent_length = 0;
40         o_data->current_pdu_sent = 0;
41
42         prs_mem_free(&o_data->frag);
43
44         /* Free any memory in the current return data buffer. */
45         prs_mem_free(&o_data->rdata);
46
47         /*
48          * Initialize the outgoing RPC data buffer.
49          * we will use this as the raw data area for replying to rpc requests.
50          */
51         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
53                 return False;
54         }
55
56         return True;
57 }
58
59 /****************************************************************************
60  Sets the fault state on incoming packets.
61 ****************************************************************************/
62
63 static void set_incoming_fault(pipes_struct *p)
64 {
65         prs_mem_free(&p->in_data.data);
66         p->in_data.pdu_needed_len = 0;
67         p->in_data.pdu_received_len = 0;
68         p->fault_state = True;
69         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70                    get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
71 }
72
73 /****************************************************************************
74  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
76
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
78 {
79         size_t len_needed_to_complete_hdr =
80                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
81
82         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83                    "len_needed_to_complete_hdr = %u, "
84                    "receive_len = %u\n",
85                    (unsigned int)data_to_copy,
86                    (unsigned int)len_needed_to_complete_hdr,
87                    (unsigned int)p->in_data.pdu_received_len ));
88
89         if (p->in_data.current_in_pdu == NULL) {
90                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
91                                                          RPC_HEADER_LEN);
92         }
93         if (p->in_data.current_in_pdu == NULL) {
94                 DEBUG(0, ("talloc failed\n"));
95                 return -1;
96         }
97
98         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99                 data, len_needed_to_complete_hdr);
100         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
101
102         return (ssize_t)len_needed_to_complete_hdr;
103 }
104
105 /****************************************************************************
106  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
107 ****************************************************************************/
108
109 static ssize_t unmarshall_rpc_header(pipes_struct *p)
110 {
111         /*
112          * Unmarshall the header to determine the needed length.
113          */
114
115         prs_struct rpc_in;
116
117         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
118                 DEBUG(0, ("unmarshall_rpc_header: "
119                           "assert on rpc header length failed.\n"));
120                 set_incoming_fault(p);
121                 return -1;
122         }
123
124         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
125         prs_set_endian_data( &rpc_in, p->endian);
126
127         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
128                                         p->in_data.pdu_received_len, False);
129
130         /*
131          * Unmarshall the header as this will tell us how much
132          * data we need to read to get the complete pdu.
133          * This also sets the endian flag in rpc_in.
134          */
135
136         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
137                 DEBUG(0, ("unmarshall_rpc_header: "
138                           "failed to unmarshall RPC_HDR.\n"));
139                 set_incoming_fault(p);
140                 prs_mem_free(&rpc_in);
141                 return -1;
142         }
143
144         /*
145          * Validate the RPC header.
146          */
147
148         if(p->hdr.major != 5 && p->hdr.minor != 0) {
149                 DEBUG(0, ("unmarshall_rpc_header: "
150                           "invalid major/minor numbers in RPC_HDR.\n"));
151                 set_incoming_fault(p);
152                 prs_mem_free(&rpc_in);
153                 return -1;
154         }
155
156         /*
157          * If there's not data in the incoming buffer this should be the
158          * start of a new RPC.
159          */
160
161         if(prs_offset(&p->in_data.data) == 0) {
162
163                 /*
164                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
165                  */
166
167                 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
168                     !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
169                         /*
170                          * Ensure that the FIRST flag is set.
171                          * If not then we have a stream missmatch.
172                          */
173
174                         DEBUG(0, ("unmarshall_rpc_header: "
175                                   "FIRST flag not set in first PDU !\n"));
176                         set_incoming_fault(p);
177                         prs_mem_free(&rpc_in);
178                         return -1;
179                 }
180
181                 /*
182                  * If this is the first PDU then set the endianness
183                  * flag in the pipe. We will need this when parsing all
184                  * data in this RPC.
185                  */
186
187                 p->endian = rpc_in.bigendian_data;
188
189                 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
190                           p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
191
192         } else {
193
194                 /*
195                  * If this is *NOT* the first PDU then check the endianness
196                  * flag in the pipe is the same as that in the PDU.
197                  */
198
199                 if (p->endian != rpc_in.bigendian_data) {
200                         DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
201                                   "flag (%d) different in next PDU !\n",
202                                   (int)p->endian));
203                         set_incoming_fault(p);
204                         prs_mem_free(&rpc_in);
205                         return -1;
206                 }
207         }
208
209         /*
210          * Ensure that the pdu length is sane.
211          */
212
213         if ((p->hdr.frag_len < RPC_HEADER_LEN) ||
214             (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
215                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
216                 set_incoming_fault(p);
217                 prs_mem_free(&rpc_in);
218                 return -1;
219         }
220
221         DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
222                    (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
223
224         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
225
226         prs_mem_free(&rpc_in);
227
228         p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
229                 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
230         if (p->in_data.current_in_pdu == NULL) {
231                 DEBUG(0, ("talloc failed\n"));
232                 set_incoming_fault(p);
233                 return -1;
234         }
235
236         return 0; /* No extra data processed. */
237 }
238
239 /****************************************************************************
240  Call this to free any talloc'ed memory. Do this before and after processing
241  a complete PDU.
242 ****************************************************************************/
243
244 static void free_pipe_context(pipes_struct *p)
245 {
246         if (p->mem_ctx) {
247                 DEBUG(3, ("free_pipe_context: "
248                           "destroying talloc pool of size %lu\n",
249                           (unsigned long)talloc_total_size(p->mem_ctx)));
250                 talloc_free_children(p->mem_ctx);
251         } else {
252                 p->mem_ctx = talloc_named(p, 0, "pipe %s %p",
253                                     get_pipe_name_from_syntax(talloc_tos(),
254                                                               &p->syntax), p);
255                 if (p->mem_ctx == NULL) {
256                         p->fault_state = True;
257                 }
258         }
259 }
260
261 /****************************************************************************
262  Processes a request pdu. This will do auth processing if needed, and
263  appends the data into the complete stream if the LAST flag is not set.
264 ****************************************************************************/
265
266 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
267 {
268         uint32 ss_padding_len = 0;
269         size_t data_len = p->hdr.frag_len
270                                 - RPC_HEADER_LEN
271                                 - RPC_HDR_REQ_LEN
272                                 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
273                                 - p->hdr.auth_len;
274
275         if(!p->pipe_bound) {
276                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
277                 set_incoming_fault(p);
278                 return False;
279         }
280
281         /*
282          * Check if we need to do authentication processing.
283          * This is only done on requests, not binds.
284          */
285
286         /*
287          * Read the RPC request header.
288          */
289
290         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
291                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
292                 set_incoming_fault(p);
293                 return False;
294         }
295
296         switch(p->auth.auth_type) {
297                 case PIPE_AUTH_TYPE_NONE:
298                         break;
299
300                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
301                 case PIPE_AUTH_TYPE_NTLMSSP:
302                 {
303                         NTSTATUS status;
304                         if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
305                                                            &ss_padding_len,
306                                                            &status)) {
307                                 DEBUG(0, ("process_request_pdu: "
308                                           "failed to do auth processing.\n"));
309                                 DEBUG(0, ("process_request_pdu: error is %s\n",
310                                           nt_errstr(status)));
311                                 set_incoming_fault(p);
312                                 return False;
313                         }
314                         break;
315                 }
316
317                 case PIPE_AUTH_TYPE_SCHANNEL:
318                         if (!api_pipe_schannel_process(p, rpc_in_p,
319                                                         &ss_padding_len)) {
320                                 DEBUG(3, ("process_request_pdu: "
321                                           "failed to do schannel processing.\n"));
322                                 set_incoming_fault(p);
323                                 return False;
324                         }
325                         break;
326
327                 default:
328                         DEBUG(0, ("process_request_pdu: "
329                                   "unknown auth type %u set.\n",
330                                   (unsigned int)p->auth.auth_type));
331                         set_incoming_fault(p);
332                         return False;
333         }
334
335         /* Now we've done the sign/seal we can remove any padding data. */
336         if (data_len > ss_padding_len) {
337                 data_len -= ss_padding_len;
338         }
339
340         /*
341          * Check the data length doesn't go over the 15Mb limit.
342          * increased after observing a bug in the Windows NT 4.0 SP6a
343          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
344          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
345          */
346
347         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
348                 DEBUG(0, ("process_request_pdu: "
349                           "rpc data buffer too large (%u) + (%u)\n",
350                           (unsigned int)prs_data_size(&p->in_data.data),
351                           (unsigned int)data_len ));
352                 set_incoming_fault(p);
353                 return False;
354         }
355
356         /*
357          * Append the data portion into the buffer and return.
358          */
359
360         if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
361                                       prs_offset(rpc_in_p), data_len)) {
362                 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
363                           "to parse buffer of size %u.\n",
364                           (unsigned int)data_len,
365                           (unsigned int)prs_data_size(&p->in_data.data)));
366                 set_incoming_fault(p);
367                 return False;
368         }
369
370         if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
371                 bool ret = False;
372                 /*
373                  * Ok - we finally have a complete RPC stream.
374                  * Call the rpc command to process it.
375                  */
376
377                 /*
378                  * Ensure the internal prs buffer size is *exactly* the same
379                  * size as the current offset.
380                  */
381
382                 if (!prs_set_buffer_size(&p->in_data.data,
383                                          prs_offset(&p->in_data.data))) {
384                         DEBUG(0, ("process_request_pdu: "
385                                   "Call to prs_set_buffer_size failed!\n"));
386                         set_incoming_fault(p);
387                         return False;
388                 }
389
390                 /*
391                  * Set the parse offset to the start of the data and set the
392                  * prs_struct to UNMARSHALL.
393                  */
394
395                 prs_set_offset(&p->in_data.data, 0);
396                 prs_switch_type(&p->in_data.data, UNMARSHALL);
397
398                 /*
399                  * Process the complete data stream here.
400                  */
401
402                 free_pipe_context(p);
403
404                 if(pipe_init_outgoing_data(p)) {
405                         ret = api_pipe_request(p);
406                 }
407
408                 free_pipe_context(p);
409
410                 /*
411                  * We have consumed the whole data stream. Set back to
412                  * marshalling and set the offset back to the start of
413                  * the buffer to re-use it (we could also do a prs_mem_free()
414                  * and then re_init on the next start of PDU. Not sure which
415                  * is best here.... JRA.
416                  */
417
418                 prs_switch_type(&p->in_data.data, MARSHALL);
419                 prs_set_offset(&p->in_data.data, 0);
420                 return ret;
421         }
422
423         return True;
424 }
425
426 /****************************************************************************
427  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
428  already been parsed and stored in p->hdr.
429 ****************************************************************************/
430
431 static void process_complete_pdu(pipes_struct *p)
432 {
433         prs_struct rpc_in;
434         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
435         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
436         bool reply = False;
437
438         if(p->fault_state) {
439                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
440                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
441                 set_incoming_fault(p);
442                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
443                 return;
444         }
445
446         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
447
448         /*
449          * Ensure we're using the corrent endianness for both the
450          * RPC header flags and the raw data we will be reading from.
451          */
452
453         prs_set_endian_data( &rpc_in, p->endian);
454         prs_set_endian_data( &p->in_data.data, p->endian);
455
456         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
457
458         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
459                         (unsigned int)p->hdr.pkt_type ));
460
461         switch (p->hdr.pkt_type) {
462                 case DCERPC_PKT_REQUEST:
463                         reply = process_request_pdu(p, &rpc_in);
464                         break;
465
466                 case DCERPC_PKT_PING: /* CL request - ignore... */
467                         DEBUG(0, ("process_complete_pdu: Error. "
468                                   "Connectionless packet type %u received on "
469                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
470                                  get_pipe_name_from_syntax(talloc_tos(),
471                                                            &p->syntax)));
472                         break;
473
474                 case DCERPC_PKT_RESPONSE: /* No responses here. */
475                         DEBUG(0, ("process_complete_pdu: Error. "
476                                   "DCERPC_PKT_RESPONSE received from client "
477                                   "on pipe %s.\n",
478                                  get_pipe_name_from_syntax(talloc_tos(),
479                                                            &p->syntax)));
480                         break;
481
482                 case DCERPC_PKT_FAULT:
483                 case DCERPC_PKT_WORKING:
484                         /* CL request - reply to a ping when a call in process. */
485                 case DCERPC_PKT_NOCALL:
486                         /* CL - server reply to a ping call. */
487                 case DCERPC_PKT_REJECT:
488                 case DCERPC_PKT_ACK:
489                 case DCERPC_PKT_CL_CANCEL:
490                 case DCERPC_PKT_FACK:
491                 case DCERPC_PKT_CANCEL_ACK:
492                         DEBUG(0, ("process_complete_pdu: Error. "
493                                   "Connectionless packet type %u received on "
494                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
495                                  get_pipe_name_from_syntax(talloc_tos(),
496                                                            &p->syntax)));
497                         break;
498
499                 case DCERPC_PKT_BIND:
500                         /*
501                          * We assume that a pipe bind is only in one pdu.
502                          */
503                         if(pipe_init_outgoing_data(p)) {
504                                 reply = api_pipe_bind_req(p, &rpc_in);
505                         }
506                         break;
507
508                 case DCERPC_PKT_BIND_ACK:
509                 case DCERPC_PKT_BIND_NAK:
510                         DEBUG(0, ("process_complete_pdu: Error. "
511                                   "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
512                                   "packet type %u received on pipe %s.\n",
513                                   (unsigned int)p->hdr.pkt_type,
514                                  get_pipe_name_from_syntax(talloc_tos(),
515                                                            &p->syntax)));
516                         break;
517
518
519                 case DCERPC_PKT_ALTER:
520                         /*
521                          * We assume that a pipe bind is only in one pdu.
522                          */
523                         if(pipe_init_outgoing_data(p)) {
524                                 reply = api_pipe_alter_context(p, &rpc_in);
525                         }
526                         break;
527
528                 case DCERPC_PKT_ALTER_RESP:
529                         DEBUG(0, ("process_complete_pdu: Error. "
530                                   "DCERPC_PKT_ALTER_RESP on pipe %s: "
531                                   "Should only be server -> client.\n",
532                                  get_pipe_name_from_syntax(talloc_tos(),
533                                                            &p->syntax)));
534                         break;
535
536                 case DCERPC_PKT_AUTH3:
537                         /*
538                          * The third packet in an NTLMSSP auth exchange.
539                          */
540                         if(pipe_init_outgoing_data(p)) {
541                                 reply = api_pipe_bind_auth3(p, &rpc_in);
542                         }
543                         break;
544
545                 case DCERPC_PKT_SHUTDOWN:
546                         DEBUG(0, ("process_complete_pdu: Error. "
547                                   "DCERPC_PKT_SHUTDOWN on pipe %s: "
548                                   "Should only be server -> client.\n",
549                                  get_pipe_name_from_syntax(talloc_tos(),
550                                                            &p->syntax)));
551                         break;
552
553                 case DCERPC_PKT_CO_CANCEL:
554                         /* For now just free all client data and continue
555                          * processing. */
556                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
557                                  " Abandoning rpc call.\n"));
558                         /* As we never do asynchronous RPC serving, we can
559                          * never cancel a call (as far as I know).
560                          * If we ever did we'd have to send a cancel_ack reply.
561                          * For now, just free all client data and continue
562                          * processing. */
563                         reply = True;
564                         break;
565 #if 0
566                         /* Enable this if we're doing async rpc. */
567                         /* We must check the outstanding callid matches. */
568                         if(pipe_init_outgoing_data(p)) {
569                                 /* Send a cancel_ack PDU reply. */
570                                 /* We should probably check the auth-verifier here. */
571                                 reply = setup_cancel_ack_reply(p, &rpc_in);
572                         }
573                         break;
574 #endif
575
576                 case DCERPC_PKT_ORPHANED:
577                         /* We should probably check the auth-verifier here.
578                          * For now just free all client data and continue
579                          * processing. */
580                         DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
581                                   " Abandoning rpc call.\n"));
582                         reply = True;
583                         break;
584
585                 default:
586                         DEBUG(0, ("process_complete_pdu: "
587                                   "Unknown rpc type = %u received.\n",
588                                   (unsigned int)p->hdr.pkt_type));
589                         break;
590         }
591
592         /* Reset to little endian.
593          * Probably don't need this but it won't hurt. */
594         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
595
596         if (!reply) {
597                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
598                          "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
599                                                                 &p->syntax)));
600                 set_incoming_fault(p);
601                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
602                 prs_mem_free(&rpc_in);
603         } else {
604                 /*
605                  * Reset the lengths. We're ready for a new pdu.
606                  */
607                 TALLOC_FREE(p->in_data.current_in_pdu);
608                 p->in_data.pdu_needed_len = 0;
609                 p->in_data.pdu_received_len = 0;
610         }
611
612         prs_mem_free(&rpc_in);
613 }
614
615 /****************************************************************************
616  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
617 ****************************************************************************/
618
619 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
620 {
621         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
622                                         - p->in_data.pdu_received_len);
623
624         DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
625                    "pdu_needed_len = %u, incoming data = %u\n",
626                    (unsigned int)p->in_data.pdu_received_len,
627                    (unsigned int)p->in_data.pdu_needed_len,
628                    (unsigned int)n ));
629
630         if(data_to_copy == 0) {
631                 /*
632                  * This is an error - data is being received and there is no
633                  * space in the PDU. Free the received data and go into the
634                  * fault state.
635                  */
636                 DEBUG(0, ("process_incoming_data: "
637                           "No space in incoming pdu buffer. "
638                           "Current size = %u incoming data size = %u\n",
639                           (unsigned int)p->in_data.pdu_received_len,
640                           (unsigned int)n));
641                 set_incoming_fault(p);
642                 return -1;
643         }
644
645         /*
646          * If we have no data already, wait until we get at least
647          * a RPC_HEADER_LEN * number of bytes before we can do anything.
648          */
649
650         if ((p->in_data.pdu_needed_len == 0) &&
651             (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
652                 /*
653                  * Always return here. If we have more data then the RPC_HEADER
654                  * will be processed the next time around the loop.
655                  */
656                 return fill_rpc_header(p, data, data_to_copy);
657         }
658
659         /*
660          * At this point we know we have at least an RPC_HEADER_LEN amount of
661          * data * stored in current_in_pdu.
662          */
663
664         /*
665          * If pdu_needed_len is zero this is a new pdu.
666          * Unmarshall the header so we know how much more
667          * data we need, then loop again.
668          */
669
670         if(p->in_data.pdu_needed_len == 0) {
671                 ssize_t rret = unmarshall_rpc_header(p);
672                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
673                         return rret;
674                 }
675                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
676                  * that consists of an RPC_HEADER only. This is a
677                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
678                  * DCERPC_PKT_ORPHANED pdu type.
679                  * Deal with this in process_complete_pdu(). */
680         }
681
682         /*
683          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
684          * Keep reading until we have a full pdu.
685          */
686
687         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
688
689         /*
690          * Copy as much of the data as we need into the current_in_pdu buffer.
691          * pdu_needed_len becomes zero when we have a complete pdu.
692          */
693
694         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
695                 data, data_to_copy);
696         p->in_data.pdu_received_len += data_to_copy;
697         p->in_data.pdu_needed_len -= data_to_copy;
698
699         /*
700          * Do we have a complete PDU ?
701          * (return the number of bytes handled in the call)
702          */
703
704         if(p->in_data.pdu_needed_len == 0) {
705                 process_complete_pdu(p);
706                 return data_to_copy;
707         }
708
709         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
710                    "pdu_received_len = %u, pdu_needed_len = %u\n",
711                    (unsigned int)p->in_data.pdu_received_len,
712                    (unsigned int)p->in_data.pdu_needed_len));
713
714         return (ssize_t)data_to_copy;
715 }
716
717 /****************************************************************************
718  Accepts incoming data on an internal rpc pipe.
719 ****************************************************************************/
720
721 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
722 {
723         size_t data_left = n;
724
725         while(data_left) {
726                 ssize_t data_used;
727
728                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
729                           (unsigned int)data_left));
730
731                 data_used = process_incoming_data(p, data, data_left);
732
733                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
734                            (int)data_used));
735
736                 if(data_used < 0) {
737                         return -1;
738                 }
739
740                 data_left -= data_used;
741                 data += data_used;
742         }
743
744         return n;
745 }
746
747 /****************************************************************************
748  Replies to a request to read data from a pipe.
749
750  Headers are interspersed with the data at PDU intervals. By the time
751  this function is called, the start of the data could possibly have been
752  read by an SMBtrans (file_offset != 0).
753
754  Calling create_rpc_reply() here is a hack. The data should already
755  have been prepared into arrays of headers + data stream sections.
756 ****************************************************************************/
757
758 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
759                                        size_t n, bool *is_data_outstanding)
760 {
761         uint32 pdu_remaining = 0;
762         ssize_t data_returned = 0;
763
764         if (!p) {
765                 DEBUG(0,("read_from_pipe: pipe not open\n"));
766                 return -1;
767         }
768
769         DEBUG(6,(" name: %s len: %u\n",
770                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
771                  (unsigned int)n));
772
773         /*
774          * We cannot return more than one PDU length per
775          * read request.
776          */
777
778         /*
779          * This condition should result in the connection being closed.
780          * Netapp filers seem to set it to 0xffff which results in domain
781          * authentications failing.  Just ignore it so things work.
782          */
783
784         if(n > RPC_MAX_PDU_FRAG_LEN) {
785                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
786                          "pipe %s. We can only service %d sized reads.\n",
787                          (unsigned int)n,
788                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
789                          RPC_MAX_PDU_FRAG_LEN ));
790                 n = RPC_MAX_PDU_FRAG_LEN;
791         }
792
793         /*
794          * Determine if there is still data to send in the
795          * pipe PDU buffer. Always send this first. Never
796          * send more than is left in the current PDU. The
797          * client should send a new read request for a new
798          * PDU.
799          */
800
801         pdu_remaining = prs_offset(&p->out_data.frag)
802                 - p->out_data.current_pdu_sent;
803
804         if (pdu_remaining > 0) {
805                 data_returned = (ssize_t)MIN(n, pdu_remaining);
806
807                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
808                           "current_pdu_sent = %u returning %d bytes.\n",
809                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
810                           (unsigned int)prs_offset(&p->out_data.frag),
811                           (unsigned int)p->out_data.current_pdu_sent,
812                           (int)data_returned));
813
814                 memcpy(data,
815                        prs_data_p(&p->out_data.frag)
816                        + p->out_data.current_pdu_sent,
817                        data_returned);
818
819                 p->out_data.current_pdu_sent += (uint32)data_returned;
820                 goto out;
821         }
822
823         /*
824          * At this point p->current_pdu_len == p->current_pdu_sent (which
825          * may of course be zero if this is the first return fragment.
826          */
827
828         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
829                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
830                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
831                   (int)p->fault_state,
832                   (unsigned int)p->out_data.data_sent_length,
833                   (unsigned int)prs_offset(&p->out_data.rdata) ));
834
835         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
836                 /*
837                  * We have sent all possible data, return 0.
838                  */
839                 data_returned = 0;
840                 goto out;
841         }
842
843         /*
844          * We need to create a new PDU from the data left in p->rdata.
845          * Create the header/data/footers. This also sets up the fields
846          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
847          * and stores the outgoing PDU in p->current_pdu.
848          */
849
850         if(!create_next_pdu(p)) {
851                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
852                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
853                 return -1;
854         }
855
856         data_returned = MIN(n, prs_offset(&p->out_data.frag));
857
858         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
859         p->out_data.current_pdu_sent += (uint32)data_returned;
860
861   out:
862         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
863
864         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
865                 /* We've returned everything in the out_data.frag
866                  * so we're done with this pdu. Free it and reset
867                  * current_pdu_sent. */
868                 p->out_data.current_pdu_sent = 0;
869                 prs_mem_free(&p->out_data.frag);
870         }
871         return data_returned;
872 }
873
874 bool fsp_is_np(struct files_struct *fsp)
875 {
876         enum FAKE_FILE_TYPE type;
877
878         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
879                 return false;
880         }
881
882         type = fsp->fake_file_handle->type;
883
884         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
885                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
886 }
887
888 struct np_proxy_state {
889         uint16_t file_type;
890         uint16_t device_state;
891         uint64_t allocation_size;
892         struct tstream_context *npipe;
893         struct tevent_queue *read_queue;
894         struct tevent_queue *write_queue;
895 };
896
897 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
898                                 const char *pipe_name,
899                                 const struct tsocket_address *local_address,
900                                 const struct tsocket_address *remote_address,
901                                 struct auth_serversupplied_info *server_info)
902 {
903         struct np_proxy_state *result;
904         char *socket_np_dir;
905         const char *socket_dir;
906         struct tevent_context *ev;
907         struct tevent_req *subreq;
908         struct netr_SamInfo3 *info3;
909         NTSTATUS status;
910         bool ok;
911         int ret;
912         int sys_errno;
913
914         result = talloc(mem_ctx, struct np_proxy_state);
915         if (result == NULL) {
916                 DEBUG(0, ("talloc failed\n"));
917                 return NULL;
918         }
919
920         result->read_queue = tevent_queue_create(result, "np_read");
921         if (result->read_queue == NULL) {
922                 DEBUG(0, ("tevent_queue_create failed\n"));
923                 goto fail;
924         }
925
926         result->write_queue = tevent_queue_create(result, "np_write");
927         if (result->write_queue == NULL) {
928                 DEBUG(0, ("tevent_queue_create failed\n"));
929                 goto fail;
930         }
931
932         ev = s3_tevent_context_init(talloc_tos());
933         if (ev == NULL) {
934                 DEBUG(0, ("s3_tevent_context_init failed\n"));
935                 goto fail;
936         }
937
938         socket_dir = lp_parm_const_string(
939                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
940                 get_dyn_NCALRPCDIR());
941         if (socket_dir == NULL) {
942                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
943                 goto fail;
944         }
945         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
946         if (socket_np_dir == NULL) {
947                 DEBUG(0, ("talloc_asprintf failed\n"));
948                 goto fail;
949         }
950
951         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
952         if (info3 == NULL) {
953                 DEBUG(0, ("talloc failed\n"));
954                 goto fail;
955         }
956
957         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
958         if (!NT_STATUS_IS_OK(status)) {
959                 TALLOC_FREE(info3);
960                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
961                           nt_errstr(status)));
962                 goto fail;
963         }
964
965         become_root();
966         subreq = tstream_npa_connect_send(talloc_tos(), ev,
967                                           socket_np_dir,
968                                           pipe_name,
969                                           remote_address, /* client_addr */
970                                           NULL, /* client_name */
971                                           local_address, /* server_addr */
972                                           NULL, /* server_name */
973                                           info3,
974                                           server_info->user_session_key,
975                                           data_blob_null /* delegated_creds */);
976         if (subreq == NULL) {
977                 unbecome_root();
978                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
979                           "user %s\\%s failed\n",
980                           socket_np_dir, pipe_name, info3->base.domain.string,
981                           info3->base.account_name.string));
982                 goto fail;
983         }
984         ok = tevent_req_poll(subreq, ev);
985         unbecome_root();
986         if (!ok) {
987                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
988                           "failed for tstream_npa_connect: %s\n",
989                           socket_np_dir, pipe_name, info3->base.domain.string,
990                           info3->base.account_name.string,
991                           strerror(errno)));
992                 goto fail;
993
994         }
995         ret = tstream_npa_connect_recv(subreq, &sys_errno,
996                                        result,
997                                        &result->npipe,
998                                        &result->file_type,
999                                        &result->device_state,
1000                                        &result->allocation_size);
1001         TALLOC_FREE(subreq);
1002         if (ret != 0) {
1003                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
1004                           "user %s\\%s failed: %s\n",
1005                           socket_np_dir, pipe_name, info3->base.domain.string,
1006                           info3->base.account_name.string,
1007                           strerror(sys_errno)));
1008                 goto fail;
1009         }
1010
1011         return result;
1012
1013  fail:
1014         TALLOC_FREE(result);
1015         return NULL;
1016 }
1017
1018 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1019                  const struct tsocket_address *local_address,
1020                  const struct tsocket_address *remote_address,
1021                  struct auth_serversupplied_info *server_info,
1022                  struct fake_file_handle **phandle)
1023 {
1024         const char **proxy_list;
1025         struct fake_file_handle *handle;
1026
1027         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1028
1029         handle = talloc(mem_ctx, struct fake_file_handle);
1030         if (handle == NULL) {
1031                 return NT_STATUS_NO_MEMORY;
1032         }
1033
1034         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1035                 struct np_proxy_state *p;
1036
1037                 p = make_external_rpc_pipe_p(handle, name,
1038                                              local_address,
1039                                              remote_address,
1040                                              server_info);
1041
1042                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1043                 handle->private_data = p;
1044         } else {
1045                 struct pipes_struct *p;
1046                 struct ndr_syntax_id syntax;
1047                 const char *client_address;
1048
1049                 if (!is_known_pipename(name, &syntax)) {
1050                         TALLOC_FREE(handle);
1051                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1052                 }
1053
1054                 if (tsocket_address_is_inet(remote_address, "ip")) {
1055                         client_address = tsocket_address_inet_addr_string(
1056                                                 remote_address,
1057                                                 talloc_tos());
1058                         if (client_address == NULL) {
1059                                 TALLOC_FREE(handle);
1060                                 return NT_STATUS_NO_MEMORY;
1061                         }
1062                 } else {
1063                         client_address = "";
1064                 }
1065
1066                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1067                                              server_info);
1068
1069                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1070                 handle->private_data = p;
1071         }
1072
1073         if (handle->private_data == NULL) {
1074                 TALLOC_FREE(handle);
1075                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1076         }
1077
1078         *phandle = handle;
1079
1080         return NT_STATUS_OK;
1081 }
1082
1083 bool np_read_in_progress(struct fake_file_handle *handle)
1084 {
1085         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1086                 return false;
1087         }
1088
1089         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1090                 struct np_proxy_state *p = talloc_get_type_abort(
1091                         handle->private_data, struct np_proxy_state);
1092                 size_t read_count;
1093
1094                 read_count = tevent_queue_length(p->read_queue);
1095                 if (read_count > 0) {
1096                         return true;
1097                 }
1098
1099                 return false;
1100         }
1101
1102         return false;
1103 }
1104
1105 struct np_write_state {
1106         struct event_context *ev;
1107         struct np_proxy_state *p;
1108         struct iovec iov;
1109         ssize_t nwritten;
1110 };
1111
1112 static void np_write_done(struct tevent_req *subreq);
1113
1114 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1115                                  struct fake_file_handle *handle,
1116                                  const uint8_t *data, size_t len)
1117 {
1118         struct tevent_req *req;
1119         struct np_write_state *state;
1120         NTSTATUS status;
1121
1122         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1123         dump_data(50, data, len);
1124
1125         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1126         if (req == NULL) {
1127                 return NULL;
1128         }
1129
1130         if (len == 0) {
1131                 state->nwritten = 0;
1132                 status = NT_STATUS_OK;
1133                 goto post_status;
1134         }
1135
1136         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1137                 struct pipes_struct *p = talloc_get_type_abort(
1138                         handle->private_data, struct pipes_struct);
1139
1140                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1141
1142                 status = (state->nwritten >= 0)
1143                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1144                 goto post_status;
1145         }
1146
1147         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1148                 struct np_proxy_state *p = talloc_get_type_abort(
1149                         handle->private_data, struct np_proxy_state);
1150                 struct tevent_req *subreq;
1151
1152                 state->ev = ev;
1153                 state->p = p;
1154                 state->iov.iov_base = CONST_DISCARD(void *, data);
1155                 state->iov.iov_len = len;
1156
1157                 subreq = tstream_writev_queue_send(state, ev,
1158                                                    p->npipe,
1159                                                    p->write_queue,
1160                                                    &state->iov, 1);
1161                 if (subreq == NULL) {
1162                         goto fail;
1163                 }
1164                 tevent_req_set_callback(subreq, np_write_done, req);
1165                 return req;
1166         }
1167
1168         status = NT_STATUS_INVALID_HANDLE;
1169  post_status:
1170         if (NT_STATUS_IS_OK(status)) {
1171                 tevent_req_done(req);
1172         } else {
1173                 tevent_req_nterror(req, status);
1174         }
1175         return tevent_req_post(req, ev);
1176  fail:
1177         TALLOC_FREE(req);
1178         return NULL;
1179 }
1180
1181 static void np_write_done(struct tevent_req *subreq)
1182 {
1183         struct tevent_req *req = tevent_req_callback_data(
1184                 subreq, struct tevent_req);
1185         struct np_write_state *state = tevent_req_data(
1186                 req, struct np_write_state);
1187         ssize_t received;
1188         int err;
1189
1190         received = tstream_writev_queue_recv(subreq, &err);
1191         if (received < 0) {
1192                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1193                 return;
1194         }
1195         state->nwritten = received;
1196         tevent_req_done(req);
1197 }
1198
1199 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1200 {
1201         struct np_write_state *state = tevent_req_data(
1202                 req, struct np_write_state);
1203         NTSTATUS status;
1204
1205         if (tevent_req_is_nterror(req, &status)) {
1206                 return status;
1207         }
1208         *pnwritten = state->nwritten;
1209         return NT_STATUS_OK;
1210 }
1211
1212 struct np_ipc_readv_next_vector_state {
1213         uint8_t *buf;
1214         size_t len;
1215         off_t ofs;
1216         size_t remaining;
1217 };
1218
1219 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1220                                           uint8_t *buf, size_t len)
1221 {
1222         ZERO_STRUCTP(s);
1223
1224         s->buf = buf;
1225         s->len = MIN(len, UINT16_MAX);
1226 }
1227
1228 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1229                                     void *private_data,
1230                                     TALLOC_CTX *mem_ctx,
1231                                     struct iovec **_vector,
1232                                     size_t *count)
1233 {
1234         struct np_ipc_readv_next_vector_state *state =
1235                 (struct np_ipc_readv_next_vector_state *)private_data;
1236         struct iovec *vector;
1237         ssize_t pending;
1238         size_t wanted;
1239
1240         if (state->ofs == state->len) {
1241                 *_vector = NULL;
1242                 *count = 0;
1243                 return 0;
1244         }
1245
1246         pending = tstream_pending_bytes(stream);
1247         if (pending == -1) {
1248                 return -1;
1249         }
1250
1251         if (pending == 0 && state->ofs != 0) {
1252                 /* return a short read */
1253                 *_vector = NULL;
1254                 *count = 0;
1255                 return 0;
1256         }
1257
1258         if (pending == 0) {
1259                 /* we want at least one byte and recheck again */
1260                 wanted = 1;
1261         } else {
1262                 size_t missing = state->len - state->ofs;
1263                 if (pending > missing) {
1264                         /* there's more available */
1265                         state->remaining = pending - missing;
1266                         wanted = missing;
1267                 } else {
1268                         /* read what we can get and recheck in the next cycle */
1269                         wanted = pending;
1270                 }
1271         }
1272
1273         vector = talloc_array(mem_ctx, struct iovec, 1);
1274         if (!vector) {
1275                 return -1;
1276         }
1277
1278         vector[0].iov_base = state->buf + state->ofs;
1279         vector[0].iov_len = wanted;
1280
1281         state->ofs += wanted;
1282
1283         *_vector = vector;
1284         *count = 1;
1285         return 0;
1286 }
1287
1288 struct np_read_state {
1289         struct np_proxy_state *p;
1290         struct np_ipc_readv_next_vector_state next_vector;
1291
1292         size_t nread;
1293         bool is_data_outstanding;
1294 };
1295
1296 static void np_read_done(struct tevent_req *subreq);
1297
1298 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1299                                 struct fake_file_handle *handle,
1300                                 uint8_t *data, size_t len)
1301 {
1302         struct tevent_req *req;
1303         struct np_read_state *state;
1304         NTSTATUS status;
1305
1306         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1307         if (req == NULL) {
1308                 return NULL;
1309         }
1310
1311         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1312                 struct pipes_struct *p = talloc_get_type_abort(
1313                         handle->private_data, struct pipes_struct);
1314
1315                 state->nread = read_from_internal_pipe(
1316                         p, (char *)data, len, &state->is_data_outstanding);
1317
1318                 status = (state->nread >= 0)
1319                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1320                 goto post_status;
1321         }
1322
1323         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1324                 struct np_proxy_state *p = talloc_get_type_abort(
1325                         handle->private_data, struct np_proxy_state);
1326                 struct tevent_req *subreq;
1327
1328                 np_ipc_readv_next_vector_init(&state->next_vector,
1329                                               data, len);
1330
1331                 subreq = tstream_readv_pdu_queue_send(state,
1332                                                       ev,
1333                                                       p->npipe,
1334                                                       p->read_queue,
1335                                                       np_ipc_readv_next_vector,
1336                                                       &state->next_vector);
1337                 if (subreq == NULL) {
1338
1339                 }
1340                 tevent_req_set_callback(subreq, np_read_done, req);
1341                 return req;
1342         }
1343
1344         status = NT_STATUS_INVALID_HANDLE;
1345  post_status:
1346         if (NT_STATUS_IS_OK(status)) {
1347                 tevent_req_done(req);
1348         } else {
1349                 tevent_req_nterror(req, status);
1350         }
1351         return tevent_req_post(req, ev);
1352 }
1353
1354 static void np_read_done(struct tevent_req *subreq)
1355 {
1356         struct tevent_req *req = tevent_req_callback_data(
1357                 subreq, struct tevent_req);
1358         struct np_read_state *state = tevent_req_data(
1359                 req, struct np_read_state);
1360         ssize_t ret;
1361         int err;
1362
1363         ret = tstream_readv_pdu_queue_recv(subreq, &err);
1364         TALLOC_FREE(subreq);
1365         if (ret == -1) {
1366                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1367                 return;
1368         }
1369
1370         state->nread = ret;
1371         state->is_data_outstanding = (state->next_vector.remaining > 0);
1372
1373         tevent_req_done(req);
1374         return;
1375 }
1376
1377 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1378                       bool *is_data_outstanding)
1379 {
1380         struct np_read_state *state = tevent_req_data(
1381                 req, struct np_read_state);
1382         NTSTATUS status;
1383
1384         if (tevent_req_is_nterror(req, &status)) {
1385                 return status;
1386         }
1387         *nread = state->nread;
1388         *is_data_outstanding = state->is_data_outstanding;
1389         return NT_STATUS_OK;
1390 }
1391
1392 /**
1393  * @brief Create a new RPC client context which uses a local dispatch function.
1394  *
1395  * @param[in]  conn  The connection struct that will hold the pipe
1396  *
1397  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
1398  *
1399  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1400  *                      error occured.
1401  */
1402 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1403                                   struct rpc_pipe_client **spoolss_pipe)
1404 {
1405         NTSTATUS status;
1406
1407         /* TODO: check and handle disconnections */
1408
1409         if (!conn->spoolss_pipe) {
1410                 status = rpc_pipe_open_internal(conn,
1411                                                 &ndr_table_spoolss.syntax_id,
1412                                                 rpc_spoolss_dispatch,
1413                                                 conn->server_info,
1414                                                 &conn->spoolss_pipe);
1415                 if (!NT_STATUS_IS_OK(status)) {
1416                         return status;
1417                 }
1418         }
1419
1420         *spoolss_pipe = conn->spoolss_pipe;
1421         return NT_STATUS_OK;
1422 }