Make the internal pipe functions static
[metze/samba/wip.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 static int pipes_open;
28
29 /*
30  * Sometimes I can't decide if I hate Windows printer driver
31  * writers more than I hate the Windows spooler service driver
32  * writers. This gets around a combination of bugs in the spooler
33  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
34  *
35  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
36  * 2002 running on NT 4.- SP6
37  * bumped up from 64 -> 256 after viewing traffic from con2prt
38  * for lots of printers on a WinNT 4.x SP6 box.
39  */
40  
41 #ifndef MAX_OPEN_SPOOLSS_PIPES
42 #define MAX_OPEN_SPOOLSS_PIPES 256
43 #endif
44 static int current_spoolss_pipes_open;
45
46 static pipes_struct *InternalPipes;
47 static struct bitmap *bmap;
48
49 /* TODO
50  * the following prototypes are declared here to avoid
51  * code being moved about too much for a patch to be
52  * disrupted / less obvious.
53  *
54  * these functions, and associated functions that they
55  * call, should be moved behind a .so module-loading
56  * system _anyway_.  so that's the next step...
57  */
58
59 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
60
61 /****************************************************************************
62  Internal Pipe iterator functions.
63 ****************************************************************************/
64
65 pipes_struct *get_first_internal_pipe(void)
66 {
67         return InternalPipes;
68 }
69
70 pipes_struct *get_next_internal_pipe(pipes_struct *p)
71 {
72         return p->next;
73 }
74
75 /* this must be larger than the sum of the open files and directories */
76 static int pipe_handle_offset;
77
78 /****************************************************************************
79  Set the pipe_handle_offset. Called from smbd/files.c
80 ****************************************************************************/
81
82 void set_pipe_handle_offset(int max_open_files)
83 {
84         if(max_open_files < 0x7000) {
85                 pipe_handle_offset = 0x7000;
86         } else {
87                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
88         }
89 }
90
91 /****************************************************************************
92  Initialise pipe handle states.
93 ****************************************************************************/
94
95 void init_rpc_pipe_hnd(void)
96 {
97         bmap = bitmap_allocate(MAX_OPEN_PIPES);
98         if (!bmap) {
99                 exit_server("out of memory in init_rpc_pipe_hnd");
100         }
101 }
102
103 /****************************************************************************
104  Initialise an outgoing packet.
105 ****************************************************************************/
106
107 static bool pipe_init_outgoing_data(pipes_struct *p)
108 {
109         output_data *o_data = &p->out_data;
110
111         /* Reset the offset counters. */
112         o_data->data_sent_length = 0;
113         o_data->current_pdu_len = 0;
114         o_data->current_pdu_sent = 0;
115
116         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
117
118         /* Free any memory in the current return data buffer. */
119         prs_mem_free(&o_data->rdata);
120
121         /*
122          * Initialize the outgoing RPC data buffer.
123          * we will use this as the raw data area for replying to rpc requests.
124          */     
125         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
126                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
127                 return False;
128         }
129
130         return True;
131 }
132
133 /****************************************************************************
134  Make an internal namedpipes structure
135 ****************************************************************************/
136
137 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
138                                                      const char *pipe_name,
139                                                      const char *client_address,
140                                                      struct auth_serversupplied_info *server_info,
141                                                      uint16_t vuid)
142 {
143         pipes_struct *p;
144
145         DEBUG(4,("Create pipe requested %s\n", pipe_name));
146
147         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
148
149         if (!p) {
150                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
151                 return NULL;
152         }
153
154         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
155                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
156                 TALLOC_FREE(p);
157                 return NULL;
158         }
159
160         if (!init_pipe_handle_list(p, pipe_name)) {
161                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
162                 talloc_destroy(p->mem_ctx);
163                 TALLOC_FREE(p);
164                 return NULL;
165         }
166
167         /*
168          * Initialize the incoming RPC data buffer with one PDU worth of memory.
169          * We cheat here and say we're marshalling, as we intend to add incoming
170          * data directly into the prs_struct and we want it to auto grow. We will
171          * change the type to UNMARSALLING before processing the stream.
172          */
173
174         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
175                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
176                 talloc_destroy(p->mem_ctx);
177                 close_policy_by_pipe(p);
178                 TALLOC_FREE(p);
179                 return NULL;
180         }
181
182         p->server_info = copy_serverinfo(p, server_info);
183         if (p->server_info == NULL) {
184                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
185                 talloc_destroy(p->mem_ctx);
186                 close_policy_by_pipe(p);
187                 TALLOC_FREE(p);
188                 return NULL;
189         }
190
191         DLIST_ADD(InternalPipes, p);
192
193         memcpy(p->client_address, client_address, sizeof(p->client_address));
194
195         p->endian = RPC_LITTLE_ENDIAN;
196
197         ZERO_STRUCT(p->pipe_user);
198
199         p->pipe_user.vuid = vuid;
200         p->pipe_user.ut.uid = (uid_t)-1;
201         p->pipe_user.ut.gid = (gid_t)-1;
202         p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
203
204         /*
205          * Initialize the outgoing RPC data buffer with no memory.
206          */     
207         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
208         
209         fstrcpy(p->name, pipe_name);
210         
211         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
212                  pipe_name, pipes_open));
213
214         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
215
216         return p;
217 }
218
219 /****************************************************************************
220  Sets the fault state on incoming packets.
221 ****************************************************************************/
222
223 static void set_incoming_fault(pipes_struct *p)
224 {
225         prs_mem_free(&p->in_data.data);
226         p->in_data.pdu_needed_len = 0;
227         p->in_data.pdu_received_len = 0;
228         p->fault_state = True;
229         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
230                    p->name));
231 }
232
233 /****************************************************************************
234  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
235 ****************************************************************************/
236
237 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
238 {
239         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
240
241         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
242                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
243                         (unsigned int)p->in_data.pdu_received_len ));
244
245         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
246         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
247
248         return (ssize_t)len_needed_to_complete_hdr;
249 }
250
251 /****************************************************************************
252  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
253 ****************************************************************************/
254
255 static ssize_t unmarshall_rpc_header(pipes_struct *p)
256 {
257         /*
258          * Unmarshall the header to determine the needed length.
259          */
260
261         prs_struct rpc_in;
262
263         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
264                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
265                 set_incoming_fault(p);
266                 return -1;
267         }
268
269         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
270         prs_set_endian_data( &rpc_in, p->endian);
271
272         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
273                                         p->in_data.pdu_received_len, False);
274
275         /*
276          * Unmarshall the header as this will tell us how much
277          * data we need to read to get the complete pdu.
278          * This also sets the endian flag in rpc_in.
279          */
280
281         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
282                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
283                 set_incoming_fault(p);
284                 prs_mem_free(&rpc_in);
285                 return -1;
286         }
287
288         /*
289          * Validate the RPC header.
290          */
291
292         if(p->hdr.major != 5 && p->hdr.minor != 0) {
293                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
294                 set_incoming_fault(p);
295                 prs_mem_free(&rpc_in);
296                 return -1;
297         }
298
299         /*
300          * If there's not data in the incoming buffer this should be the start of a new RPC.
301          */
302
303         if(prs_offset(&p->in_data.data) == 0) {
304
305                 /*
306                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
307                  */
308
309                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
310                         /*
311                          * Ensure that the FIRST flag is set. If not then we have
312                          * a stream missmatch.
313                          */
314
315                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
316                         set_incoming_fault(p);
317                         prs_mem_free(&rpc_in);
318                         return -1;
319                 }
320
321                 /*
322                  * If this is the first PDU then set the endianness
323                  * flag in the pipe. We will need this when parsing all
324                  * data in this RPC.
325                  */
326
327                 p->endian = rpc_in.bigendian_data;
328
329                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
330                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
331
332         } else {
333
334                 /*
335                  * If this is *NOT* the first PDU then check the endianness
336                  * flag in the pipe is the same as that in the PDU.
337                  */
338
339                 if (p->endian != rpc_in.bigendian_data) {
340                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
341                         set_incoming_fault(p);
342                         prs_mem_free(&rpc_in);
343                         return -1;
344                 }
345         }
346
347         /*
348          * Ensure that the pdu length is sane.
349          */
350
351         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
352                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
353                 set_incoming_fault(p);
354                 prs_mem_free(&rpc_in);
355                 return -1;
356         }
357
358         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
359                         (unsigned int)p->hdr.flags ));
360
361         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
362
363         prs_mem_free(&rpc_in);
364
365         return 0; /* No extra data processed. */
366 }
367
368 /****************************************************************************
369  Call this to free any talloc'ed memory. Do this before and after processing
370  a complete PDU.
371 ****************************************************************************/
372
373 static void free_pipe_context(pipes_struct *p)
374 {
375         if (p->mem_ctx) {
376                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
377                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
378                 talloc_free_children(p->mem_ctx);
379         } else {
380                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
381                 if (p->mem_ctx == NULL) {
382                         p->fault_state = True;
383                 }
384         }
385 }
386
387 /****************************************************************************
388  Processes a request pdu. This will do auth processing if needed, and
389  appends the data into the complete stream if the LAST flag is not set.
390 ****************************************************************************/
391
392 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
393 {
394         uint32 ss_padding_len = 0;
395         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
396                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
397
398         if(!p->pipe_bound) {
399                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
400                 set_incoming_fault(p);
401                 return False;
402         }
403
404         /*
405          * Check if we need to do authentication processing.
406          * This is only done on requests, not binds.
407          */
408
409         /*
410          * Read the RPC request header.
411          */
412
413         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
414                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
415                 set_incoming_fault(p);
416                 return False;
417         }
418
419         switch(p->auth.auth_type) {
420                 case PIPE_AUTH_TYPE_NONE:
421                         break;
422
423                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
424                 case PIPE_AUTH_TYPE_NTLMSSP:
425                 {
426                         NTSTATUS status;
427                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
428                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
429                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
430                                 set_incoming_fault(p);
431                                 return False;
432                         }
433                         break;
434                 }
435
436                 case PIPE_AUTH_TYPE_SCHANNEL:
437                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
438                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
439                                 set_incoming_fault(p);
440                                 return False;
441                         }
442                         break;
443
444                 default:
445                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
446                         set_incoming_fault(p);
447                         return False;
448         }
449
450         /* Now we've done the sign/seal we can remove any padding data. */
451         if (data_len > ss_padding_len) {
452                 data_len -= ss_padding_len;
453         }
454
455         /*
456          * Check the data length doesn't go over the 15Mb limit.
457          * increased after observing a bug in the Windows NT 4.0 SP6a
458          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
459          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
460          */
461         
462         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
463                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
464                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
465                 set_incoming_fault(p);
466                 return False;
467         }
468
469         /*
470          * Append the data portion into the buffer and return.
471          */
472
473         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
474                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
475                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
476                 set_incoming_fault(p);
477                 return False;
478         }
479
480         if(p->hdr.flags & RPC_FLG_LAST) {
481                 bool ret = False;
482                 /*
483                  * Ok - we finally have a complete RPC stream.
484                  * Call the rpc command to process it.
485                  */
486
487                 /*
488                  * Ensure the internal prs buffer size is *exactly* the same
489                  * size as the current offset.
490                  */
491
492                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
493                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
494                         set_incoming_fault(p);
495                         return False;
496                 }
497
498                 /*
499                  * Set the parse offset to the start of the data and set the
500                  * prs_struct to UNMARSHALL.
501                  */
502
503                 prs_set_offset(&p->in_data.data, 0);
504                 prs_switch_type(&p->in_data.data, UNMARSHALL);
505
506                 /*
507                  * Process the complete data stream here.
508                  */
509
510                 free_pipe_context(p);
511
512                 if(pipe_init_outgoing_data(p)) {
513                         ret = api_pipe_request(p);
514                 }
515
516                 free_pipe_context(p);
517
518                 /*
519                  * We have consumed the whole data stream. Set back to
520                  * marshalling and set the offset back to the start of
521                  * the buffer to re-use it (we could also do a prs_mem_free()
522                  * and then re_init on the next start of PDU. Not sure which
523                  * is best here.... JRA.
524                  */
525
526                 prs_switch_type(&p->in_data.data, MARSHALL);
527                 prs_set_offset(&p->in_data.data, 0);
528                 return ret;
529         }
530
531         return True;
532 }
533
534 /****************************************************************************
535  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
536  already been parsed and stored in p->hdr.
537 ****************************************************************************/
538
539 static void process_complete_pdu(pipes_struct *p)
540 {
541         prs_struct rpc_in;
542         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
543         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
544         bool reply = False;
545
546         if(p->fault_state) {
547                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
548                         p->name ));
549                 set_incoming_fault(p);
550                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
551                 return;
552         }
553
554         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
555
556         /*
557          * Ensure we're using the corrent endianness for both the 
558          * RPC header flags and the raw data we will be reading from.
559          */
560
561         prs_set_endian_data( &rpc_in, p->endian);
562         prs_set_endian_data( &p->in_data.data, p->endian);
563
564         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
565
566         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
567                         (unsigned int)p->hdr.pkt_type ));
568
569         switch (p->hdr.pkt_type) {
570                 case RPC_REQUEST:
571                         reply = process_request_pdu(p, &rpc_in);
572                         break;
573
574                 case RPC_PING: /* CL request - ignore... */
575                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
576                                 (unsigned int)p->hdr.pkt_type, p->name));
577                         break;
578
579                 case RPC_RESPONSE: /* No responses here. */
580                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
581                                 p->name ));
582                         break;
583
584                 case RPC_FAULT:
585                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
586                 case RPC_NOCALL: /* CL - server reply to a ping call. */
587                 case RPC_REJECT:
588                 case RPC_ACK:
589                 case RPC_CL_CANCEL:
590                 case RPC_FACK:
591                 case RPC_CANCEL_ACK:
592                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
593                                 (unsigned int)p->hdr.pkt_type, p->name));
594                         break;
595
596                 case RPC_BIND:
597                         /*
598                          * We assume that a pipe bind is only in one pdu.
599                          */
600                         if(pipe_init_outgoing_data(p)) {
601                                 reply = api_pipe_bind_req(p, &rpc_in);
602                         }
603                         break;
604
605                 case RPC_BINDACK:
606                 case RPC_BINDNACK:
607                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
608                                 (unsigned int)p->hdr.pkt_type, p->name));
609                         break;
610
611
612                 case RPC_ALTCONT:
613                         /*
614                          * We assume that a pipe bind is only in one pdu.
615                          */
616                         if(pipe_init_outgoing_data(p)) {
617                                 reply = api_pipe_alter_context(p, &rpc_in);
618                         }
619                         break;
620
621                 case RPC_ALTCONTRESP:
622                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
623                                 p->name));
624                         break;
625
626                 case RPC_AUTH3:
627                         /*
628                          * The third packet in an NTLMSSP auth exchange.
629                          */
630                         if(pipe_init_outgoing_data(p)) {
631                                 reply = api_pipe_bind_auth3(p, &rpc_in);
632                         }
633                         break;
634
635                 case RPC_SHUTDOWN:
636                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
637                                 p->name));
638                         break;
639
640                 case RPC_CO_CANCEL:
641                         /* For now just free all client data and continue processing. */
642                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
643                         /* As we never do asynchronous RPC serving, we can never cancel a
644                            call (as far as I know). If we ever did we'd have to send a cancel_ack
645                            reply. For now, just free all client data and continue processing. */
646                         reply = True;
647                         break;
648 #if 0
649                         /* Enable this if we're doing async rpc. */
650                         /* We must check the call-id matches the outstanding callid. */
651                         if(pipe_init_outgoing_data(p)) {
652                                 /* Send a cancel_ack PDU reply. */
653                                 /* We should probably check the auth-verifier here. */
654                                 reply = setup_cancel_ack_reply(p, &rpc_in);
655                         }
656                         break;
657 #endif
658
659                 case RPC_ORPHANED:
660                         /* We should probably check the auth-verifier here.
661                            For now just free all client data and continue processing. */
662                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
663                         reply = True;
664                         break;
665
666                 default:
667                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
668                         break;
669         }
670
671         /* Reset to little endian. Probably don't need this but it won't hurt. */
672         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
673
674         if (!reply) {
675                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
676                 set_incoming_fault(p);
677                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
678                 prs_mem_free(&rpc_in);
679         } else {
680                 /*
681                  * Reset the lengths. We're ready for a new pdu.
682                  */
683                 p->in_data.pdu_needed_len = 0;
684                 p->in_data.pdu_received_len = 0;
685         }
686
687         prs_mem_free(&rpc_in);
688 }
689
690 /****************************************************************************
691  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
692 ****************************************************************************/
693
694 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
695 {
696         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
697
698         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
699                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
700                 (unsigned int)n ));
701
702         if(data_to_copy == 0) {
703                 /*
704                  * This is an error - data is being received and there is no
705                  * space in the PDU. Free the received data and go into the fault state.
706                  */
707                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
708 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
709                 set_incoming_fault(p);
710                 return -1;
711         }
712
713         /*
714          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
715          * number of bytes before we can do anything.
716          */
717
718         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
719                 /*
720                  * Always return here. If we have more data then the RPC_HEADER
721                  * will be processed the next time around the loop.
722                  */
723                 return fill_rpc_header(p, data, data_to_copy);
724         }
725
726         /*
727          * At this point we know we have at least an RPC_HEADER_LEN amount of data
728          * stored in current_in_pdu.
729          */
730
731         /*
732          * If pdu_needed_len is zero this is a new pdu. 
733          * Unmarshall the header so we know how much more
734          * data we need, then loop again.
735          */
736
737         if(p->in_data.pdu_needed_len == 0) {
738                 ssize_t rret = unmarshall_rpc_header(p);
739                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
740                         return rret;
741                 }
742                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
743                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
744                    pdu type. Deal with this in process_complete_pdu(). */
745         }
746
747         /*
748          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
749          * Keep reading until we have a full pdu.
750          */
751
752         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
753
754         /*
755          * Copy as much of the data as we need into the current_in_pdu buffer.
756          * pdu_needed_len becomes zero when we have a complete pdu.
757          */
758
759         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
760         p->in_data.pdu_received_len += data_to_copy;
761         p->in_data.pdu_needed_len -= data_to_copy;
762
763         /*
764          * Do we have a complete PDU ?
765          * (return the number of bytes handled in the call)
766          */
767
768         if(p->in_data.pdu_needed_len == 0) {
769                 process_complete_pdu(p);
770                 return data_to_copy;
771         }
772
773         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
774                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
775
776         return (ssize_t)data_to_copy;
777 }
778
779 /****************************************************************************
780  Accepts incoming data on an internal rpc pipe.
781 ****************************************************************************/
782
783 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
784 {
785         size_t data_left = n;
786
787         while(data_left) {
788                 ssize_t data_used;
789
790                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
791
792                 data_used = process_incoming_data(p, data, data_left);
793
794                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
795
796                 if(data_used < 0) {
797                         return -1;
798                 }
799
800                 data_left -= data_used;
801                 data += data_used;
802         }       
803
804         return n;
805 }
806
807 /****************************************************************************
808  Replies to a request to read data from a pipe.
809
810  Headers are interspersed with the data at PDU intervals. By the time
811  this function is called, the start of the data could possibly have been
812  read by an SMBtrans (file_offset != 0).
813
814  Calling create_rpc_reply() here is a hack. The data should already
815  have been prepared into arrays of headers + data stream sections.
816 ****************************************************************************/
817
818 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
819                                        bool *is_data_outstanding)
820 {
821         uint32 pdu_remaining = 0;
822         ssize_t data_returned = 0;
823
824         if (!p) {
825                 DEBUG(0,("read_from_pipe: pipe not open\n"));
826                 return -1;              
827         }
828
829         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
830
831         /*
832          * We cannot return more than one PDU length per
833          * read request.
834          */
835
836         /*
837          * This condition should result in the connection being closed.  
838          * Netapp filers seem to set it to 0xffff which results in domain
839          * authentications failing.  Just ignore it so things work.
840          */
841
842         if(n > RPC_MAX_PDU_FRAG_LEN) {
843                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
844 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
845                 n = RPC_MAX_PDU_FRAG_LEN;
846         }
847
848         /*
849          * Determine if there is still data to send in the
850          * pipe PDU buffer. Always send this first. Never
851          * send more than is left in the current PDU. The
852          * client should send a new read request for a new
853          * PDU.
854          */
855
856         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
857                 data_returned = (ssize_t)MIN(n, pdu_remaining);
858
859                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
860 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
861                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
862
863                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
864                 p->out_data.current_pdu_sent += (uint32)data_returned;
865                 goto out;
866         }
867
868         /*
869          * At this point p->current_pdu_len == p->current_pdu_sent (which
870          * may of course be zero if this is the first return fragment.
871          */
872
873         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
874 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
875                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
876
877         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
878                 /*
879                  * We have sent all possible data, return 0.
880                  */
881                 data_returned = 0;
882                 goto out;
883         }
884
885         /*
886          * We need to create a new PDU from the data left in p->rdata.
887          * Create the header/data/footers. This also sets up the fields
888          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
889          * and stores the outgoing PDU in p->current_pdu.
890          */
891
892         if(!create_next_pdu(p)) {
893                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
894                 return -1;
895         }
896
897         data_returned = MIN(n, p->out_data.current_pdu_len);
898
899         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
900         p->out_data.current_pdu_sent += (uint32)data_returned;
901
902   out:
903
904         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
905         return data_returned;
906 }
907
908 /****************************************************************************
909  Close an rpc pipe.
910 ****************************************************************************/
911
912 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
913 {
914         if (!p) {
915                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
916                 return False;
917         }
918
919         prs_mem_free(&p->out_data.rdata);
920         prs_mem_free(&p->in_data.data);
921
922         if (p->auth.auth_data_free_func) {
923                 (*p->auth.auth_data_free_func)(&p->auth);
924         }
925
926         if (p->mem_ctx) {
927                 talloc_destroy(p->mem_ctx);
928         }
929
930         free_pipe_rpc_context( p->contexts );
931
932         /* Free the handles database. */
933         close_policy_by_pipe(p);
934
935         TALLOC_FREE(p->pipe_user.nt_user_token);
936         SAFE_FREE(p->pipe_user.ut.groups);
937
938         DLIST_REMOVE(InternalPipes, p);
939
940         ZERO_STRUCTP(p);
941
942         TALLOC_FREE(p);
943         
944         return True;
945 }
946
947 bool fsp_is_np(struct files_struct *fsp)
948 {
949         return ((fsp != NULL)
950                 && (fsp->fake_file_handle != NULL)
951                 && (fsp->fake_file_handle->type == FAKE_FILE_TYPE_NAMED_PIPE));
952 }
953
954 NTSTATUS np_open(struct smb_request *smb_req, struct connection_struct *conn,
955                  const char *name, struct files_struct **pfsp)
956 {
957         NTSTATUS status;
958         struct files_struct *fsp;
959         struct pipes_struct *p;
960
961         status = file_new(smb_req, conn, &fsp);
962         if (!NT_STATUS_IS_OK(status)) {
963                 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
964                 return status;
965         }
966
967         fsp->conn = conn;
968         fsp->fh->fd = -1;
969         fsp->vuid = smb_req->vuid;
970         fsp->can_lock = false;
971         fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
972         string_set(&fsp->fsp_name, name);
973
974         fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
975         if (fsp->fake_file_handle == NULL) {
976                 file_free(smb_req, fsp);
977                 return NT_STATUS_NO_MEMORY;
978         }
979         fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
980
981         p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
982                                      conn->client_address, conn->server_info,
983                                      smb_req->vuid);
984         if (p == NULL) {
985                 file_free(smb_req, fsp);
986                 return NT_STATUS_PIPE_NOT_AVAILABLE;
987         }
988         fsp->fake_file_handle->private_data = p;
989
990         *pfsp = fsp;
991
992         return NT_STATUS_OK;
993 }
994
995 NTSTATUS np_write(struct files_struct *fsp, uint8_t *data, size_t len,
996                   ssize_t *nwritten)
997 {
998         struct pipes_struct *p;
999
1000         if (!fsp_is_np(fsp)) {
1001                 return NT_STATUS_INVALID_HANDLE;
1002         }
1003
1004         p = talloc_get_type_abort(
1005                 fsp->fake_file_handle->private_data, struct pipes_struct);
1006
1007         DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
1008                   fsp->fsp_name, (int)len));
1009         dump_data(50, data, len);
1010
1011         *nwritten = write_to_internal_pipe(p, (char *)data, len);
1012
1013         return ((*nwritten) >= 0)
1014                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1015 }
1016
1017 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
1018                  ssize_t *nread, bool *is_data_outstanding)
1019 {
1020         struct pipes_struct *p;
1021
1022         if (!fsp_is_np(fsp)) {
1023                 return NT_STATUS_INVALID_HANDLE;
1024         }
1025
1026         p = talloc_get_type_abort(
1027                 fsp->fake_file_handle->private_data, struct pipes_struct);
1028
1029         *nread = read_from_internal_pipe(p, (char *)data, len,
1030                                          is_data_outstanding);
1031
1032         return ((*nread) >= 0)
1033                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1034
1035 }