s3-rpc: when using rpc_pipe_open_internal, make sure to go through NDR.
[kamenim/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26
27 #undef DBGC_CLASS
28 #define DBGC_CLASS DBGC_RPC_SRV
29
30 /****************************************************************************
31  Initialise an outgoing packet.
32 ****************************************************************************/
33
34 static bool pipe_init_outgoing_data(pipes_struct *p)
35 {
36         output_data *o_data = &p->out_data;
37
38         /* Reset the offset counters. */
39         o_data->data_sent_length = 0;
40         o_data->current_pdu_sent = 0;
41
42         prs_mem_free(&o_data->frag);
43
44         /* Free any memory in the current return data buffer. */
45         prs_mem_free(&o_data->rdata);
46
47         /*
48          * Initialize the outgoing RPC data buffer.
49          * we will use this as the raw data area for replying to rpc requests.
50          */
51         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
53                 return False;
54         }
55
56         return True;
57 }
58
59 /****************************************************************************
60  Sets the fault state on incoming packets.
61 ****************************************************************************/
62
63 static void set_incoming_fault(pipes_struct *p)
64 {
65         prs_mem_free(&p->in_data.data);
66         p->in_data.pdu_needed_len = 0;
67         p->in_data.pdu_received_len = 0;
68         p->fault_state = True;
69         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70                    get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
71 }
72
73 /****************************************************************************
74  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
76
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
78 {
79         size_t len_needed_to_complete_hdr =
80                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
81
82         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83                    "len_needed_to_complete_hdr = %u, "
84                    "receive_len = %u\n",
85                    (unsigned int)data_to_copy,
86                    (unsigned int)len_needed_to_complete_hdr,
87                    (unsigned int)p->in_data.pdu_received_len ));
88
89         if (p->in_data.current_in_pdu == NULL) {
90                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
91                                                          RPC_HEADER_LEN);
92         }
93         if (p->in_data.current_in_pdu == NULL) {
94                 DEBUG(0, ("talloc failed\n"));
95                 return -1;
96         }
97
98         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99                 data, len_needed_to_complete_hdr);
100         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
101
102         return (ssize_t)len_needed_to_complete_hdr;
103 }
104
105 /****************************************************************************
106  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
107 ****************************************************************************/
108
109 static ssize_t unmarshall_rpc_header(pipes_struct *p)
110 {
111         /*
112          * Unmarshall the header to determine the needed length.
113          */
114
115         prs_struct rpc_in;
116
117         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
118                 DEBUG(0, ("unmarshall_rpc_header: "
119                           "assert on rpc header length failed.\n"));
120                 set_incoming_fault(p);
121                 return -1;
122         }
123
124         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
125         prs_set_endian_data( &rpc_in, p->endian);
126
127         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
128                                         p->in_data.pdu_received_len, False);
129
130         /*
131          * Unmarshall the header as this will tell us how much
132          * data we need to read to get the complete pdu.
133          * This also sets the endian flag in rpc_in.
134          */
135
136         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
137                 DEBUG(0, ("unmarshall_rpc_header: "
138                           "failed to unmarshall RPC_HDR.\n"));
139                 set_incoming_fault(p);
140                 prs_mem_free(&rpc_in);
141                 return -1;
142         }
143
144         /*
145          * Validate the RPC header.
146          */
147
148         if(p->hdr.major != 5 && p->hdr.minor != 0) {
149                 DEBUG(0, ("unmarshall_rpc_header: "
150                           "invalid major/minor numbers in RPC_HDR.\n"));
151                 set_incoming_fault(p);
152                 prs_mem_free(&rpc_in);
153                 return -1;
154         }
155
156         /*
157          * If there's not data in the incoming buffer this should be the
158          * start of a new RPC.
159          */
160
161         if(prs_offset(&p->in_data.data) == 0) {
162
163                 /*
164                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
165                  */
166
167                 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
168                     !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
169                         /*
170                          * Ensure that the FIRST flag is set.
171                          * If not then we have a stream missmatch.
172                          */
173
174                         DEBUG(0, ("unmarshall_rpc_header: "
175                                   "FIRST flag not set in first PDU !\n"));
176                         set_incoming_fault(p);
177                         prs_mem_free(&rpc_in);
178                         return -1;
179                 }
180
181                 /*
182                  * If this is the first PDU then set the endianness
183                  * flag in the pipe. We will need this when parsing all
184                  * data in this RPC.
185                  */
186
187                 p->endian = rpc_in.bigendian_data;
188
189                 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
190                           p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
191
192         } else {
193
194                 /*
195                  * If this is *NOT* the first PDU then check the endianness
196                  * flag in the pipe is the same as that in the PDU.
197                  */
198
199                 if (p->endian != rpc_in.bigendian_data) {
200                         DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
201                                   "flag (%d) different in next PDU !\n",
202                                   (int)p->endian));
203                         set_incoming_fault(p);
204                         prs_mem_free(&rpc_in);
205                         return -1;
206                 }
207         }
208
209         /*
210          * Ensure that the pdu length is sane.
211          */
212
213         if ((p->hdr.frag_len < RPC_HEADER_LEN) ||
214             (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
215                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
216                 set_incoming_fault(p);
217                 prs_mem_free(&rpc_in);
218                 return -1;
219         }
220
221         DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
222                    (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
223
224         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
225
226         prs_mem_free(&rpc_in);
227
228         p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
229                 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
230         if (p->in_data.current_in_pdu == NULL) {
231                 DEBUG(0, ("talloc failed\n"));
232                 set_incoming_fault(p);
233                 return -1;
234         }
235
236         return 0; /* No extra data processed. */
237 }
238
239 /****************************************************************************
240   Call this to free any talloc'ed memory. Do this after processing
241   a complete incoming and outgoing request (multiple incoming/outgoing
242   PDU's).
243 ****************************************************************************/
244
245 static void free_pipe_context(pipes_struct *p)
246 {
247         prs_mem_free(&p->out_data.frag);
248         prs_mem_free(&p->out_data.rdata);
249         prs_mem_free(&p->in_data.data);
250
251         DEBUG(3, ("free_pipe_context: "
252                 "destroying talloc pool of size %lu\n",
253                 (unsigned long)talloc_total_size(p->mem_ctx)));
254         talloc_free_children(p->mem_ctx);
255         /*
256          * Re-initialize to set back to marshalling and set the
257          * offset back to the start of the buffer.
258          */
259         if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
260                 DEBUG(0, ("free_pipe_context: "
261                           "rps_init failed!\n"));
262                 p->fault_state = True;
263         }
264 }
265
266 /****************************************************************************
267  Processes a request pdu. This will do auth processing if needed, and
268  appends the data into the complete stream if the LAST flag is not set.
269 ****************************************************************************/
270
271 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
272 {
273         uint32 ss_padding_len = 0;
274         size_t data_len = p->hdr.frag_len
275                                 - RPC_HEADER_LEN
276                                 - RPC_HDR_REQ_LEN
277                                 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
278                                 - p->hdr.auth_len;
279
280         if(!p->pipe_bound) {
281                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
282                 set_incoming_fault(p);
283                 return False;
284         }
285
286         /*
287          * Check if we need to do authentication processing.
288          * This is only done on requests, not binds.
289          */
290
291         /*
292          * Read the RPC request header.
293          */
294
295         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
296                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
297                 set_incoming_fault(p);
298                 return False;
299         }
300
301         switch(p->auth.auth_type) {
302                 case PIPE_AUTH_TYPE_NONE:
303                         break;
304
305                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
306                 case PIPE_AUTH_TYPE_NTLMSSP:
307                 {
308                         NTSTATUS status;
309                         if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
310                                                            &ss_padding_len,
311                                                            &status)) {
312                                 DEBUG(0, ("process_request_pdu: "
313                                           "failed to do auth processing.\n"));
314                                 DEBUG(0, ("process_request_pdu: error is %s\n",
315                                           nt_errstr(status)));
316                                 set_incoming_fault(p);
317                                 return False;
318                         }
319                         break;
320                 }
321
322                 case PIPE_AUTH_TYPE_SCHANNEL:
323                         if (!api_pipe_schannel_process(p, rpc_in_p,
324                                                         &ss_padding_len)) {
325                                 DEBUG(3, ("process_request_pdu: "
326                                           "failed to do schannel processing.\n"));
327                                 set_incoming_fault(p);
328                                 return False;
329                         }
330                         break;
331
332                 default:
333                         DEBUG(0, ("process_request_pdu: "
334                                   "unknown auth type %u set.\n",
335                                   (unsigned int)p->auth.auth_type));
336                         set_incoming_fault(p);
337                         return False;
338         }
339
340         /* Now we've done the sign/seal we can remove any padding data. */
341         if (data_len > ss_padding_len) {
342                 data_len -= ss_padding_len;
343         }
344
345         /*
346          * Check the data length doesn't go over the 15Mb limit.
347          * increased after observing a bug in the Windows NT 4.0 SP6a
348          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
349          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
350          */
351
352         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
353                 DEBUG(0, ("process_request_pdu: "
354                           "rpc data buffer too large (%u) + (%u)\n",
355                           (unsigned int)prs_data_size(&p->in_data.data),
356                           (unsigned int)data_len ));
357                 set_incoming_fault(p);
358                 return False;
359         }
360
361         /*
362          * Append the data portion into the buffer and return.
363          */
364
365         if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
366                                       prs_offset(rpc_in_p), data_len)) {
367                 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
368                           "to parse buffer of size %u.\n",
369                           (unsigned int)data_len,
370                           (unsigned int)prs_data_size(&p->in_data.data)));
371                 set_incoming_fault(p);
372                 return False;
373         }
374
375         if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
376                 bool ret = False;
377                 /*
378                  * Ok - we finally have a complete RPC stream.
379                  * Call the rpc command to process it.
380                  */
381
382                 /*
383                  * Ensure the internal prs buffer size is *exactly* the same
384                  * size as the current offset.
385                  */
386
387                 if (!prs_set_buffer_size(&p->in_data.data,
388                                          prs_offset(&p->in_data.data))) {
389                         DEBUG(0, ("process_request_pdu: "
390                                   "Call to prs_set_buffer_size failed!\n"));
391                         set_incoming_fault(p);
392                         return False;
393                 }
394
395                 /*
396                  * Set the parse offset to the start of the data and set the
397                  * prs_struct to UNMARSHALL.
398                  */
399
400                 prs_set_offset(&p->in_data.data, 0);
401                 prs_switch_type(&p->in_data.data, UNMARSHALL);
402
403                 /*
404                  * Process the complete data stream here.
405                  */
406
407                 if(pipe_init_outgoing_data(p)) {
408                         ret = api_pipe_request(p);
409                 }
410
411                 return ret;
412         }
413
414         return True;
415 }
416
417 /****************************************************************************
418  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
419  already been parsed and stored in p->hdr.
420 ****************************************************************************/
421
422 static void process_complete_pdu(pipes_struct *p)
423 {
424         prs_struct rpc_in;
425         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
426         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
427         bool reply = False;
428
429         if(p->fault_state) {
430                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
431                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
432                 set_incoming_fault(p);
433                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
434                 return;
435         }
436
437         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
438
439         /*
440          * Ensure we're using the corrent endianness for both the
441          * RPC header flags and the raw data we will be reading from.
442          */
443
444         prs_set_endian_data( &rpc_in, p->endian);
445         prs_set_endian_data( &p->in_data.data, p->endian);
446
447         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
448
449         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
450                         (unsigned int)p->hdr.pkt_type ));
451
452         switch (p->hdr.pkt_type) {
453                 case DCERPC_PKT_REQUEST:
454                         reply = process_request_pdu(p, &rpc_in);
455                         break;
456
457                 case DCERPC_PKT_PING: /* CL request - ignore... */
458                         DEBUG(0, ("process_complete_pdu: Error. "
459                                   "Connectionless packet type %u received on "
460                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
461                                  get_pipe_name_from_syntax(talloc_tos(),
462                                                            &p->syntax)));
463                         break;
464
465                 case DCERPC_PKT_RESPONSE: /* No responses here. */
466                         DEBUG(0, ("process_complete_pdu: Error. "
467                                   "DCERPC_PKT_RESPONSE received from client "
468                                   "on pipe %s.\n",
469                                  get_pipe_name_from_syntax(talloc_tos(),
470                                                            &p->syntax)));
471                         break;
472
473                 case DCERPC_PKT_FAULT:
474                 case DCERPC_PKT_WORKING:
475                         /* CL request - reply to a ping when a call in process. */
476                 case DCERPC_PKT_NOCALL:
477                         /* CL - server reply to a ping call. */
478                 case DCERPC_PKT_REJECT:
479                 case DCERPC_PKT_ACK:
480                 case DCERPC_PKT_CL_CANCEL:
481                 case DCERPC_PKT_FACK:
482                 case DCERPC_PKT_CANCEL_ACK:
483                         DEBUG(0, ("process_complete_pdu: Error. "
484                                   "Connectionless packet type %u received on "
485                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
486                                  get_pipe_name_from_syntax(talloc_tos(),
487                                                            &p->syntax)));
488                         break;
489
490                 case DCERPC_PKT_BIND:
491                         /*
492                          * We assume that a pipe bind is only in one pdu.
493                          */
494                         if(pipe_init_outgoing_data(p)) {
495                                 reply = api_pipe_bind_req(p, &rpc_in);
496                         }
497                         break;
498
499                 case DCERPC_PKT_BIND_ACK:
500                 case DCERPC_PKT_BIND_NAK:
501                         DEBUG(0, ("process_complete_pdu: Error. "
502                                   "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
503                                   "packet type %u received on pipe %s.\n",
504                                   (unsigned int)p->hdr.pkt_type,
505                                  get_pipe_name_from_syntax(talloc_tos(),
506                                                            &p->syntax)));
507                         break;
508
509
510                 case DCERPC_PKT_ALTER:
511                         /*
512                          * We assume that a pipe bind is only in one pdu.
513                          */
514                         if(pipe_init_outgoing_data(p)) {
515                                 reply = api_pipe_alter_context(p, &rpc_in);
516                         }
517                         break;
518
519                 case DCERPC_PKT_ALTER_RESP:
520                         DEBUG(0, ("process_complete_pdu: Error. "
521                                   "DCERPC_PKT_ALTER_RESP on pipe %s: "
522                                   "Should only be server -> client.\n",
523                                  get_pipe_name_from_syntax(talloc_tos(),
524                                                            &p->syntax)));
525                         break;
526
527                 case DCERPC_PKT_AUTH3:
528                         /*
529                          * The third packet in an NTLMSSP auth exchange.
530                          */
531                         if(pipe_init_outgoing_data(p)) {
532                                 reply = api_pipe_bind_auth3(p, &rpc_in);
533                         }
534                         break;
535
536                 case DCERPC_PKT_SHUTDOWN:
537                         DEBUG(0, ("process_complete_pdu: Error. "
538                                   "DCERPC_PKT_SHUTDOWN on pipe %s: "
539                                   "Should only be server -> client.\n",
540                                  get_pipe_name_from_syntax(talloc_tos(),
541                                                            &p->syntax)));
542                         break;
543
544                 case DCERPC_PKT_CO_CANCEL:
545                         /* For now just free all client data and continue
546                          * processing. */
547                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
548                                  " Abandoning rpc call.\n"));
549                         /* As we never do asynchronous RPC serving, we can
550                          * never cancel a call (as far as I know).
551                          * If we ever did we'd have to send a cancel_ack reply.
552                          * For now, just free all client data and continue
553                          * processing. */
554                         reply = True;
555                         break;
556 #if 0
557                         /* Enable this if we're doing async rpc. */
558                         /* We must check the outstanding callid matches. */
559                         if(pipe_init_outgoing_data(p)) {
560                                 /* Send a cancel_ack PDU reply. */
561                                 /* We should probably check the auth-verifier here. */
562                                 reply = setup_cancel_ack_reply(p, &rpc_in);
563                         }
564                         break;
565 #endif
566
567                 case DCERPC_PKT_ORPHANED:
568                         /* We should probably check the auth-verifier here.
569                          * For now just free all client data and continue
570                          * processing. */
571                         DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
572                                   " Abandoning rpc call.\n"));
573                         reply = True;
574                         break;
575
576                 default:
577                         DEBUG(0, ("process_complete_pdu: "
578                                   "Unknown rpc type = %u received.\n",
579                                   (unsigned int)p->hdr.pkt_type));
580                         break;
581         }
582
583         /* Reset to little endian.
584          * Probably don't need this but it won't hurt. */
585         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
586
587         if (!reply) {
588                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
589                          "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
590                                                                 &p->syntax)));
591                 set_incoming_fault(p);
592                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
593                 prs_mem_free(&rpc_in);
594         } else {
595                 /*
596                  * Reset the lengths. We're ready for a new pdu.
597                  */
598                 TALLOC_FREE(p->in_data.current_in_pdu);
599                 p->in_data.pdu_needed_len = 0;
600                 p->in_data.pdu_received_len = 0;
601         }
602
603         prs_mem_free(&rpc_in);
604 }
605
606 /****************************************************************************
607  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
608 ****************************************************************************/
609
610 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
611 {
612         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
613                                         - p->in_data.pdu_received_len);
614
615         DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
616                    "pdu_needed_len = %u, incoming data = %u\n",
617                    (unsigned int)p->in_data.pdu_received_len,
618                    (unsigned int)p->in_data.pdu_needed_len,
619                    (unsigned int)n ));
620
621         if(data_to_copy == 0) {
622                 /*
623                  * This is an error - data is being received and there is no
624                  * space in the PDU. Free the received data and go into the
625                  * fault state.
626                  */
627                 DEBUG(0, ("process_incoming_data: "
628                           "No space in incoming pdu buffer. "
629                           "Current size = %u incoming data size = %u\n",
630                           (unsigned int)p->in_data.pdu_received_len,
631                           (unsigned int)n));
632                 set_incoming_fault(p);
633                 return -1;
634         }
635
636         /*
637          * If we have no data already, wait until we get at least
638          * a RPC_HEADER_LEN * number of bytes before we can do anything.
639          */
640
641         if ((p->in_data.pdu_needed_len == 0) &&
642             (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
643                 /*
644                  * Always return here. If we have more data then the RPC_HEADER
645                  * will be processed the next time around the loop.
646                  */
647                 return fill_rpc_header(p, data, data_to_copy);
648         }
649
650         /*
651          * At this point we know we have at least an RPC_HEADER_LEN amount of
652          * data * stored in current_in_pdu.
653          */
654
655         /*
656          * If pdu_needed_len is zero this is a new pdu.
657          * Unmarshall the header so we know how much more
658          * data we need, then loop again.
659          */
660
661         if(p->in_data.pdu_needed_len == 0) {
662                 ssize_t rret = unmarshall_rpc_header(p);
663                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
664                         return rret;
665                 }
666                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
667                  * that consists of an RPC_HEADER only. This is a
668                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
669                  * DCERPC_PKT_ORPHANED pdu type.
670                  * Deal with this in process_complete_pdu(). */
671         }
672
673         /*
674          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
675          * Keep reading until we have a full pdu.
676          */
677
678         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
679
680         /*
681          * Copy as much of the data as we need into the current_in_pdu buffer.
682          * pdu_needed_len becomes zero when we have a complete pdu.
683          */
684
685         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
686                 data, data_to_copy);
687         p->in_data.pdu_received_len += data_to_copy;
688         p->in_data.pdu_needed_len -= data_to_copy;
689
690         /*
691          * Do we have a complete PDU ?
692          * (return the number of bytes handled in the call)
693          */
694
695         if(p->in_data.pdu_needed_len == 0) {
696                 process_complete_pdu(p);
697                 return data_to_copy;
698         }
699
700         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
701                    "pdu_received_len = %u, pdu_needed_len = %u\n",
702                    (unsigned int)p->in_data.pdu_received_len,
703                    (unsigned int)p->in_data.pdu_needed_len));
704
705         return (ssize_t)data_to_copy;
706 }
707
708 /****************************************************************************
709  Accepts incoming data on an internal rpc pipe.
710 ****************************************************************************/
711
712 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
713 {
714         size_t data_left = n;
715
716         while(data_left) {
717                 ssize_t data_used;
718
719                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
720                           (unsigned int)data_left));
721
722                 data_used = process_incoming_data(p, data, data_left);
723
724                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
725                            (int)data_used));
726
727                 if(data_used < 0) {
728                         return -1;
729                 }
730
731                 data_left -= data_used;
732                 data += data_used;
733         }
734
735         return n;
736 }
737
738 /****************************************************************************
739  Replies to a request to read data from a pipe.
740
741  Headers are interspersed with the data at PDU intervals. By the time
742  this function is called, the start of the data could possibly have been
743  read by an SMBtrans (file_offset != 0).
744
745  Calling create_rpc_reply() here is a hack. The data should already
746  have been prepared into arrays of headers + data stream sections.
747 ****************************************************************************/
748
749 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
750                                        size_t n, bool *is_data_outstanding)
751 {
752         uint32 pdu_remaining = 0;
753         ssize_t data_returned = 0;
754
755         if (!p) {
756                 DEBUG(0,("read_from_pipe: pipe not open\n"));
757                 return -1;
758         }
759
760         DEBUG(6,(" name: %s len: %u\n",
761                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
762                  (unsigned int)n));
763
764         /*
765          * We cannot return more than one PDU length per
766          * read request.
767          */
768
769         /*
770          * This condition should result in the connection being closed.
771          * Netapp filers seem to set it to 0xffff which results in domain
772          * authentications failing.  Just ignore it so things work.
773          */
774
775         if(n > RPC_MAX_PDU_FRAG_LEN) {
776                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
777                          "pipe %s. We can only service %d sized reads.\n",
778                          (unsigned int)n,
779                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
780                          RPC_MAX_PDU_FRAG_LEN ));
781                 n = RPC_MAX_PDU_FRAG_LEN;
782         }
783
784         /*
785          * Determine if there is still data to send in the
786          * pipe PDU buffer. Always send this first. Never
787          * send more than is left in the current PDU. The
788          * client should send a new read request for a new
789          * PDU.
790          */
791
792         pdu_remaining = prs_offset(&p->out_data.frag)
793                 - p->out_data.current_pdu_sent;
794
795         if (pdu_remaining > 0) {
796                 data_returned = (ssize_t)MIN(n, pdu_remaining);
797
798                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
799                           "current_pdu_sent = %u returning %d bytes.\n",
800                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
801                           (unsigned int)prs_offset(&p->out_data.frag),
802                           (unsigned int)p->out_data.current_pdu_sent,
803                           (int)data_returned));
804
805                 memcpy(data,
806                        prs_data_p(&p->out_data.frag)
807                        + p->out_data.current_pdu_sent,
808                        data_returned);
809
810                 p->out_data.current_pdu_sent += (uint32)data_returned;
811                 goto out;
812         }
813
814         /*
815          * At this point p->current_pdu_len == p->current_pdu_sent (which
816          * may of course be zero if this is the first return fragment.
817          */
818
819         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
820                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
821                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
822                   (int)p->fault_state,
823                   (unsigned int)p->out_data.data_sent_length,
824                   (unsigned int)prs_offset(&p->out_data.rdata) ));
825
826         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
827                 /*
828                  * We have sent all possible data, return 0.
829                  */
830                 data_returned = 0;
831                 goto out;
832         }
833
834         /*
835          * We need to create a new PDU from the data left in p->rdata.
836          * Create the header/data/footers. This also sets up the fields
837          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
838          * and stores the outgoing PDU in p->current_pdu.
839          */
840
841         if(!create_next_pdu(p)) {
842                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
843                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
844                 return -1;
845         }
846
847         data_returned = MIN(n, prs_offset(&p->out_data.frag));
848
849         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
850         p->out_data.current_pdu_sent += (uint32)data_returned;
851
852   out:
853         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
854
855         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
856                 /* We've returned everything in the out_data.frag
857                  * so we're done with this pdu. Free it and reset
858                  * current_pdu_sent. */
859                 p->out_data.current_pdu_sent = 0;
860                 prs_mem_free(&p->out_data.frag);
861
862                 if (p->out_data.data_sent_length
863                     >= prs_offset(&p->out_data.rdata)) {
864                         /*
865                          * We're completely finished with both outgoing and
866                          * incoming data streams. It's safe to free all
867                          * temporary data from this request.
868                          */
869                         free_pipe_context(p);
870                 }
871         }
872
873         return data_returned;
874 }
875
876 bool fsp_is_np(struct files_struct *fsp)
877 {
878         enum FAKE_FILE_TYPE type;
879
880         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
881                 return false;
882         }
883
884         type = fsp->fake_file_handle->type;
885
886         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
887                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
888 }
889
890 struct np_proxy_state {
891         uint16_t file_type;
892         uint16_t device_state;
893         uint64_t allocation_size;
894         struct tstream_context *npipe;
895         struct tevent_queue *read_queue;
896         struct tevent_queue *write_queue;
897 };
898
899 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
900                                 const char *pipe_name,
901                                 const struct tsocket_address *local_address,
902                                 const struct tsocket_address *remote_address,
903                                 struct auth_serversupplied_info *server_info)
904 {
905         struct np_proxy_state *result;
906         char *socket_np_dir;
907         const char *socket_dir;
908         struct tevent_context *ev;
909         struct tevent_req *subreq;
910         struct netr_SamInfo3 *info3;
911         NTSTATUS status;
912         bool ok;
913         int ret;
914         int sys_errno;
915
916         result = talloc(mem_ctx, struct np_proxy_state);
917         if (result == NULL) {
918                 DEBUG(0, ("talloc failed\n"));
919                 return NULL;
920         }
921
922         result->read_queue = tevent_queue_create(result, "np_read");
923         if (result->read_queue == NULL) {
924                 DEBUG(0, ("tevent_queue_create failed\n"));
925                 goto fail;
926         }
927
928         result->write_queue = tevent_queue_create(result, "np_write");
929         if (result->write_queue == NULL) {
930                 DEBUG(0, ("tevent_queue_create failed\n"));
931                 goto fail;
932         }
933
934         ev = s3_tevent_context_init(talloc_tos());
935         if (ev == NULL) {
936                 DEBUG(0, ("s3_tevent_context_init failed\n"));
937                 goto fail;
938         }
939
940         socket_dir = lp_parm_const_string(
941                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
942                 get_dyn_NCALRPCDIR());
943         if (socket_dir == NULL) {
944                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
945                 goto fail;
946         }
947         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
948         if (socket_np_dir == NULL) {
949                 DEBUG(0, ("talloc_asprintf failed\n"));
950                 goto fail;
951         }
952
953         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
954         if (info3 == NULL) {
955                 DEBUG(0, ("talloc failed\n"));
956                 goto fail;
957         }
958
959         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
960         if (!NT_STATUS_IS_OK(status)) {
961                 TALLOC_FREE(info3);
962                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
963                           nt_errstr(status)));
964                 goto fail;
965         }
966
967         become_root();
968         subreq = tstream_npa_connect_send(talloc_tos(), ev,
969                                           socket_np_dir,
970                                           pipe_name,
971                                           remote_address, /* client_addr */
972                                           NULL, /* client_name */
973                                           local_address, /* server_addr */
974                                           NULL, /* server_name */
975                                           info3,
976                                           server_info->user_session_key,
977                                           data_blob_null /* delegated_creds */);
978         if (subreq == NULL) {
979                 unbecome_root();
980                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
981                           "user %s\\%s failed\n",
982                           socket_np_dir, pipe_name, info3->base.domain.string,
983                           info3->base.account_name.string));
984                 goto fail;
985         }
986         ok = tevent_req_poll(subreq, ev);
987         unbecome_root();
988         if (!ok) {
989                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
990                           "failed for tstream_npa_connect: %s\n",
991                           socket_np_dir, pipe_name, info3->base.domain.string,
992                           info3->base.account_name.string,
993                           strerror(errno)));
994                 goto fail;
995
996         }
997         ret = tstream_npa_connect_recv(subreq, &sys_errno,
998                                        result,
999                                        &result->npipe,
1000                                        &result->file_type,
1001                                        &result->device_state,
1002                                        &result->allocation_size);
1003         TALLOC_FREE(subreq);
1004         if (ret != 0) {
1005                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
1006                           "user %s\\%s failed: %s\n",
1007                           socket_np_dir, pipe_name, info3->base.domain.string,
1008                           info3->base.account_name.string,
1009                           strerror(sys_errno)));
1010                 goto fail;
1011         }
1012
1013         return result;
1014
1015  fail:
1016         TALLOC_FREE(result);
1017         return NULL;
1018 }
1019
1020 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1021                  const struct tsocket_address *local_address,
1022                  const struct tsocket_address *remote_address,
1023                  struct auth_serversupplied_info *server_info,
1024                  struct fake_file_handle **phandle)
1025 {
1026         const char **proxy_list;
1027         struct fake_file_handle *handle;
1028
1029         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1030
1031         handle = talloc(mem_ctx, struct fake_file_handle);
1032         if (handle == NULL) {
1033                 return NT_STATUS_NO_MEMORY;
1034         }
1035
1036         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1037                 struct np_proxy_state *p;
1038
1039                 p = make_external_rpc_pipe_p(handle, name,
1040                                              local_address,
1041                                              remote_address,
1042                                              server_info);
1043
1044                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1045                 handle->private_data = p;
1046         } else {
1047                 struct pipes_struct *p;
1048                 struct ndr_syntax_id syntax;
1049                 const char *client_address;
1050
1051                 if (!is_known_pipename(name, &syntax)) {
1052                         TALLOC_FREE(handle);
1053                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1054                 }
1055
1056                 if (tsocket_address_is_inet(remote_address, "ip")) {
1057                         client_address = tsocket_address_inet_addr_string(
1058                                                 remote_address,
1059                                                 talloc_tos());
1060                         if (client_address == NULL) {
1061                                 TALLOC_FREE(handle);
1062                                 return NT_STATUS_NO_MEMORY;
1063                         }
1064                 } else {
1065                         client_address = "";
1066                 }
1067
1068                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1069                                              server_info);
1070
1071                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1072                 handle->private_data = p;
1073         }
1074
1075         if (handle->private_data == NULL) {
1076                 TALLOC_FREE(handle);
1077                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1078         }
1079
1080         *phandle = handle;
1081
1082         return NT_STATUS_OK;
1083 }
1084
1085 bool np_read_in_progress(struct fake_file_handle *handle)
1086 {
1087         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1088                 return false;
1089         }
1090
1091         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1092                 struct np_proxy_state *p = talloc_get_type_abort(
1093                         handle->private_data, struct np_proxy_state);
1094                 size_t read_count;
1095
1096                 read_count = tevent_queue_length(p->read_queue);
1097                 if (read_count > 0) {
1098                         return true;
1099                 }
1100
1101                 return false;
1102         }
1103
1104         return false;
1105 }
1106
1107 struct np_write_state {
1108         struct event_context *ev;
1109         struct np_proxy_state *p;
1110         struct iovec iov;
1111         ssize_t nwritten;
1112 };
1113
1114 static void np_write_done(struct tevent_req *subreq);
1115
1116 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1117                                  struct fake_file_handle *handle,
1118                                  const uint8_t *data, size_t len)
1119 {
1120         struct tevent_req *req;
1121         struct np_write_state *state;
1122         NTSTATUS status;
1123
1124         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1125         dump_data(50, data, len);
1126
1127         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1128         if (req == NULL) {
1129                 return NULL;
1130         }
1131
1132         if (len == 0) {
1133                 state->nwritten = 0;
1134                 status = NT_STATUS_OK;
1135                 goto post_status;
1136         }
1137
1138         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1139                 struct pipes_struct *p = talloc_get_type_abort(
1140                         handle->private_data, struct pipes_struct);
1141
1142                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1143
1144                 status = (state->nwritten >= 0)
1145                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1146                 goto post_status;
1147         }
1148
1149         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1150                 struct np_proxy_state *p = talloc_get_type_abort(
1151                         handle->private_data, struct np_proxy_state);
1152                 struct tevent_req *subreq;
1153
1154                 state->ev = ev;
1155                 state->p = p;
1156                 state->iov.iov_base = CONST_DISCARD(void *, data);
1157                 state->iov.iov_len = len;
1158
1159                 subreq = tstream_writev_queue_send(state, ev,
1160                                                    p->npipe,
1161                                                    p->write_queue,
1162                                                    &state->iov, 1);
1163                 if (subreq == NULL) {
1164                         goto fail;
1165                 }
1166                 tevent_req_set_callback(subreq, np_write_done, req);
1167                 return req;
1168         }
1169
1170         status = NT_STATUS_INVALID_HANDLE;
1171  post_status:
1172         if (NT_STATUS_IS_OK(status)) {
1173                 tevent_req_done(req);
1174         } else {
1175                 tevent_req_nterror(req, status);
1176         }
1177         return tevent_req_post(req, ev);
1178  fail:
1179         TALLOC_FREE(req);
1180         return NULL;
1181 }
1182
1183 static void np_write_done(struct tevent_req *subreq)
1184 {
1185         struct tevent_req *req = tevent_req_callback_data(
1186                 subreq, struct tevent_req);
1187         struct np_write_state *state = tevent_req_data(
1188                 req, struct np_write_state);
1189         ssize_t received;
1190         int err;
1191
1192         received = tstream_writev_queue_recv(subreq, &err);
1193         if (received < 0) {
1194                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1195                 return;
1196         }
1197         state->nwritten = received;
1198         tevent_req_done(req);
1199 }
1200
1201 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1202 {
1203         struct np_write_state *state = tevent_req_data(
1204                 req, struct np_write_state);
1205         NTSTATUS status;
1206
1207         if (tevent_req_is_nterror(req, &status)) {
1208                 return status;
1209         }
1210         *pnwritten = state->nwritten;
1211         return NT_STATUS_OK;
1212 }
1213
1214 struct np_ipc_readv_next_vector_state {
1215         uint8_t *buf;
1216         size_t len;
1217         off_t ofs;
1218         size_t remaining;
1219 };
1220
1221 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1222                                           uint8_t *buf, size_t len)
1223 {
1224         ZERO_STRUCTP(s);
1225
1226         s->buf = buf;
1227         s->len = MIN(len, UINT16_MAX);
1228 }
1229
1230 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1231                                     void *private_data,
1232                                     TALLOC_CTX *mem_ctx,
1233                                     struct iovec **_vector,
1234                                     size_t *count)
1235 {
1236         struct np_ipc_readv_next_vector_state *state =
1237                 (struct np_ipc_readv_next_vector_state *)private_data;
1238         struct iovec *vector;
1239         ssize_t pending;
1240         size_t wanted;
1241
1242         if (state->ofs == state->len) {
1243                 *_vector = NULL;
1244                 *count = 0;
1245                 return 0;
1246         }
1247
1248         pending = tstream_pending_bytes(stream);
1249         if (pending == -1) {
1250                 return -1;
1251         }
1252
1253         if (pending == 0 && state->ofs != 0) {
1254                 /* return a short read */
1255                 *_vector = NULL;
1256                 *count = 0;
1257                 return 0;
1258         }
1259
1260         if (pending == 0) {
1261                 /* we want at least one byte and recheck again */
1262                 wanted = 1;
1263         } else {
1264                 size_t missing = state->len - state->ofs;
1265                 if (pending > missing) {
1266                         /* there's more available */
1267                         state->remaining = pending - missing;
1268                         wanted = missing;
1269                 } else {
1270                         /* read what we can get and recheck in the next cycle */
1271                         wanted = pending;
1272                 }
1273         }
1274
1275         vector = talloc_array(mem_ctx, struct iovec, 1);
1276         if (!vector) {
1277                 return -1;
1278         }
1279
1280         vector[0].iov_base = state->buf + state->ofs;
1281         vector[0].iov_len = wanted;
1282
1283         state->ofs += wanted;
1284
1285         *_vector = vector;
1286         *count = 1;
1287         return 0;
1288 }
1289
1290 struct np_read_state {
1291         struct np_proxy_state *p;
1292         struct np_ipc_readv_next_vector_state next_vector;
1293
1294         size_t nread;
1295         bool is_data_outstanding;
1296 };
1297
1298 static void np_read_done(struct tevent_req *subreq);
1299
1300 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1301                                 struct fake_file_handle *handle,
1302                                 uint8_t *data, size_t len)
1303 {
1304         struct tevent_req *req;
1305         struct np_read_state *state;
1306         NTSTATUS status;
1307
1308         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1309         if (req == NULL) {
1310                 return NULL;
1311         }
1312
1313         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1314                 struct pipes_struct *p = talloc_get_type_abort(
1315                         handle->private_data, struct pipes_struct);
1316
1317                 state->nread = read_from_internal_pipe(
1318                         p, (char *)data, len, &state->is_data_outstanding);
1319
1320                 status = (state->nread >= 0)
1321                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1322                 goto post_status;
1323         }
1324
1325         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1326                 struct np_proxy_state *p = talloc_get_type_abort(
1327                         handle->private_data, struct np_proxy_state);
1328                 struct tevent_req *subreq;
1329
1330                 np_ipc_readv_next_vector_init(&state->next_vector,
1331                                               data, len);
1332
1333                 subreq = tstream_readv_pdu_queue_send(state,
1334                                                       ev,
1335                                                       p->npipe,
1336                                                       p->read_queue,
1337                                                       np_ipc_readv_next_vector,
1338                                                       &state->next_vector);
1339                 if (subreq == NULL) {
1340
1341                 }
1342                 tevent_req_set_callback(subreq, np_read_done, req);
1343                 return req;
1344         }
1345
1346         status = NT_STATUS_INVALID_HANDLE;
1347  post_status:
1348         if (NT_STATUS_IS_OK(status)) {
1349                 tevent_req_done(req);
1350         } else {
1351                 tevent_req_nterror(req, status);
1352         }
1353         return tevent_req_post(req, ev);
1354 }
1355
1356 static void np_read_done(struct tevent_req *subreq)
1357 {
1358         struct tevent_req *req = tevent_req_callback_data(
1359                 subreq, struct tevent_req);
1360         struct np_read_state *state = tevent_req_data(
1361                 req, struct np_read_state);
1362         ssize_t ret;
1363         int err;
1364
1365         ret = tstream_readv_pdu_queue_recv(subreq, &err);
1366         TALLOC_FREE(subreq);
1367         if (ret == -1) {
1368                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1369                 return;
1370         }
1371
1372         state->nread = ret;
1373         state->is_data_outstanding = (state->next_vector.remaining > 0);
1374
1375         tevent_req_done(req);
1376         return;
1377 }
1378
1379 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1380                       bool *is_data_outstanding)
1381 {
1382         struct np_read_state *state = tevent_req_data(
1383                 req, struct np_read_state);
1384         NTSTATUS status;
1385
1386         if (tevent_req_is_nterror(req, &status)) {
1387                 return status;
1388         }
1389         *nread = state->nread;
1390         *is_data_outstanding = state->is_data_outstanding;
1391         return NT_STATUS_OK;
1392 }
1393
1394 /**
1395  * @brief Create a new RPC client context which uses a local dispatch function.
1396  *
1397  * @param[in]  conn  The connection struct that will hold the pipe
1398  *
1399  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
1400  *
1401  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1402  *                      error occured.
1403  */
1404 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1405                                   struct rpc_pipe_client **spoolss_pipe)
1406 {
1407         NTSTATUS status;
1408
1409         /* TODO: check and handle disconnections */
1410
1411         if (!conn->spoolss_pipe) {
1412                 status = rpc_pipe_open_internal(conn,
1413                                                 &ndr_table_spoolss.syntax_id,
1414                                                 conn->server_info,
1415                                                 &conn->spoolss_pipe);
1416                 if (!NT_STATUS_IS_OK(status)) {
1417                         return status;
1418                 }
1419         }
1420
1421         *spoolss_pipe = conn->spoolss_pipe;
1422         return NT_STATUS_OK;
1423 }