[GLUE] Rsync SAMBA_3_2_0 SVN r25598 in order to create the v3-2-test branch.
[samba.git] / source / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 BOOL *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static BOOL close_internal_rpc_pipe_hnd(void *np_conn);
68 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
69                               connection_struct *conn, uint16 vuid);
70
71 /****************************************************************************
72  Pipe iterator functions.
73 ****************************************************************************/
74
75 smb_np_struct *get_first_pipe(void)
76 {
77         return Pipes;
78 }
79
80 smb_np_struct *get_next_pipe(smb_np_struct *p)
81 {
82         return p->next;
83 }
84
85 /****************************************************************************
86  Internal Pipe iterator functions.
87 ****************************************************************************/
88
89 pipes_struct *get_first_internal_pipe(void)
90 {
91         return InternalPipes;
92 }
93
94 pipes_struct *get_next_internal_pipe(pipes_struct *p)
95 {
96         return p->next;
97 }
98
99 /* this must be larger than the sum of the open files and directories */
100 static int pipe_handle_offset;
101
102 /****************************************************************************
103  Set the pipe_handle_offset. Called from smbd/files.c
104 ****************************************************************************/
105
106 void set_pipe_handle_offset(int max_open_files)
107 {
108         if(max_open_files < 0x7000) {
109                 pipe_handle_offset = 0x7000;
110         } else {
111                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
112         }
113 }
114
115 /****************************************************************************
116  Reset pipe chain handle number.
117 ****************************************************************************/
118
119 void reset_chain_p(void)
120 {
121         chain_p = NULL;
122 }
123
124 /****************************************************************************
125  Initialise pipe handle states.
126 ****************************************************************************/
127
128 void init_rpc_pipe_hnd(void)
129 {
130         bmap = bitmap_allocate(MAX_OPEN_PIPES);
131         if (!bmap) {
132                 exit_server("out of memory in init_rpc_pipe_hnd");
133         }
134 }
135
136 /****************************************************************************
137  Initialise an outgoing packet.
138 ****************************************************************************/
139
140 static BOOL pipe_init_outgoing_data(pipes_struct *p)
141 {
142         output_data *o_data = &p->out_data;
143
144         /* Reset the offset counters. */
145         o_data->data_sent_length = 0;
146         o_data->current_pdu_len = 0;
147         o_data->current_pdu_sent = 0;
148
149         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
150
151         /* Free any memory in the current return data buffer. */
152         prs_mem_free(&o_data->rdata);
153
154         /*
155          * Initialize the outgoing RPC data buffer.
156          * we will use this as the raw data area for replying to rpc requests.
157          */     
158         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
159                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
160                 return False;
161         }
162
163         return True;
164 }
165
166 /****************************************************************************
167  Find first available pipe slot.
168 ****************************************************************************/
169
170 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
171                               connection_struct *conn, uint16 vuid)
172 {
173         int i;
174         smb_np_struct *p, *p_it;
175         static int next_pipe;
176         BOOL is_spoolss_pipe = False;
177
178         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
179                  pipe_name, pipes_open));
180
181         if (strstr(pipe_name, "spoolss")) {
182                 is_spoolss_pipe = True;
183         }
184  
185         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
186                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
187                         pipe_name ));
188                 return NULL;
189         }
190
191         /* not repeating pipe numbers makes it easier to track things in 
192            log files and prevents client bugs where pipe numbers are reused
193            over connection restarts */
194
195         if (next_pipe == 0) {
196                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
197         }
198
199         i = bitmap_find(bmap, next_pipe);
200
201         if (i == -1) {
202                 DEBUG(0,("ERROR! Out of pipe structures\n"));
203                 return NULL;
204         }
205
206         next_pipe = (i+1) % MAX_OPEN_PIPES;
207
208         for (p = Pipes; p; p = p->next) {
209                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
210         }
211
212         p = SMB_MALLOC_P(smb_np_struct);
213         if (!p) {
214                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
215                 return NULL;
216         }
217
218         ZERO_STRUCTP(p);
219
220         /* add a dso mechanism instead of this, here */
221
222         p->namedpipe_create = make_internal_rpc_pipe_p;
223         p->namedpipe_read = read_from_internal_pipe;
224         p->namedpipe_write = write_to_internal_pipe;
225         p->namedpipe_close = close_internal_rpc_pipe_hnd;
226
227         p->np_state = p->namedpipe_create(pipe_name, conn, vuid);
228
229         if (p->np_state == NULL) {
230                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
231                 SAFE_FREE(p);
232                 return NULL;
233         }
234
235         DLIST_ADD(Pipes, p);
236
237         /*
238          * Initialize the incoming RPC data buffer with one PDU worth of memory.
239          * We cheat here and say we're marshalling, as we intend to add incoming
240          * data directly into the prs_struct and we want it to auto grow. We will
241          * change the type to UNMARSALLING before processing the stream.
242          */
243
244         bitmap_set(bmap, i);
245         i += pipe_handle_offset;
246
247         pipes_open++;
248
249         p->pnum = i;
250
251         p->open = True;
252         p->device_state = 0;
253         p->priority = 0;
254         p->conn = conn;
255         p->vuid  = vuid;
256
257         p->max_trans_reply = 0;
258         
259         fstrcpy(p->name, pipe_name);
260         
261         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
262                  pipe_name, i, pipes_open));
263         
264         chain_p = p;
265         
266         /* Iterate over p_it as a temp variable, to display all open pipes */ 
267         for (p_it = Pipes; p_it; p_it = p_it->next) {
268                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
269         }
270
271         return chain_p;
272 }
273
274 /****************************************************************************
275  Make an internal namedpipes structure
276 ****************************************************************************/
277
278 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
279                               connection_struct *conn, uint16 vuid)
280 {
281         pipes_struct *p;
282         user_struct *vuser = get_valid_user_struct(vuid);
283
284         DEBUG(4,("Create pipe requested %s\n", pipe_name));
285
286         if (!vuser && vuid != UID_FIELD_INVALID) {
287                 DEBUG(0,("ERROR! vuid %d did not map to a valid vuser struct!\n", vuid));
288                 return NULL;
289         }
290
291         p = SMB_MALLOC_P(pipes_struct);
292
293         if (!p) {
294                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
295                 return NULL;
296         }
297
298         ZERO_STRUCTP(p);
299
300         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
301                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
302                 SAFE_FREE(p);
303                 return NULL;
304         }
305
306         if ((p->pipe_state_mem_ctx = talloc_init("pipe_state %s %p", pipe_name, p)) == NULL) {
307                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
308                 talloc_destroy(p->mem_ctx);
309                 SAFE_FREE(p);
310                 return NULL;
311         }
312
313         if (!init_pipe_handle_list(p, pipe_name)) {
314                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
315                 talloc_destroy(p->mem_ctx);
316                 talloc_destroy(p->pipe_state_mem_ctx);
317                 SAFE_FREE(p);
318                 return NULL;
319         }
320
321         /*
322          * Initialize the incoming RPC data buffer with one PDU worth of memory.
323          * We cheat here and say we're marshalling, as we intend to add incoming
324          * data directly into the prs_struct and we want it to auto grow. We will
325          * change the type to UNMARSALLING before processing the stream.
326          */
327
328         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
329                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
330                 talloc_destroy(p->mem_ctx);
331                 talloc_destroy(p->pipe_state_mem_ctx);
332                 close_policy_by_pipe(p);
333                 SAFE_FREE(p);
334                 return NULL;
335         }
336
337         DLIST_ADD(InternalPipes, p);
338
339         p->conn = conn;
340
341         p->vuid  = vuid;
342
343         p->endian = RPC_LITTLE_ENDIAN;
344
345         ZERO_STRUCT(p->pipe_user);
346
347         p->pipe_user.ut.uid = (uid_t)-1;
348         p->pipe_user.ut.gid = (gid_t)-1;
349         
350         /* Store the session key and NT_TOKEN */
351         if (vuser) {
352                 p->session_key = data_blob(vuser->session_key.data, vuser->session_key.length);
353                 p->pipe_user.nt_user_token = dup_nt_token(
354                         NULL, vuser->nt_user_token);
355         }
356
357         /*
358          * Initialize the outgoing RPC data buffer with no memory.
359          */     
360         prs_init(&p->out_data.rdata, 0, p->mem_ctx, MARSHALL);
361         
362         fstrcpy(p->name, pipe_name);
363         
364         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
365                  pipe_name, pipes_open));
366
367         return (void*)p;
368 }
369
370 /****************************************************************************
371  Sets the fault state on incoming packets.
372 ****************************************************************************/
373
374 static void set_incoming_fault(pipes_struct *p)
375 {
376         prs_mem_free(&p->in_data.data);
377         p->in_data.pdu_needed_len = 0;
378         p->in_data.pdu_received_len = 0;
379         p->fault_state = True;
380         DEBUG(10,("set_incoming_fault: Setting fault state on pipe %s : vuid = 0x%x\n",
381                 p->name, p->vuid ));
382 }
383
384 /****************************************************************************
385  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
386 ****************************************************************************/
387
388 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
389 {
390         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
391
392         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
393                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
394                         (unsigned int)p->in_data.pdu_received_len ));
395
396         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
397         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
398
399         return (ssize_t)len_needed_to_complete_hdr;
400 }
401
402 /****************************************************************************
403  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
404 ****************************************************************************/
405
406 static ssize_t unmarshall_rpc_header(pipes_struct *p)
407 {
408         /*
409          * Unmarshall the header to determine the needed length.
410          */
411
412         prs_struct rpc_in;
413
414         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
415                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
416                 set_incoming_fault(p);
417                 return -1;
418         }
419
420         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
421         prs_set_endian_data( &rpc_in, p->endian);
422
423         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
424                                         p->in_data.pdu_received_len, False);
425
426         /*
427          * Unmarshall the header as this will tell us how much
428          * data we need to read to get the complete pdu.
429          * This also sets the endian flag in rpc_in.
430          */
431
432         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
433                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
434                 set_incoming_fault(p);
435                 prs_mem_free(&rpc_in);
436                 return -1;
437         }
438
439         /*
440          * Validate the RPC header.
441          */
442
443         if(p->hdr.major != 5 && p->hdr.minor != 0) {
444                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
445                 set_incoming_fault(p);
446                 prs_mem_free(&rpc_in);
447                 return -1;
448         }
449
450         /*
451          * If there's not data in the incoming buffer this should be the start of a new RPC.
452          */
453
454         if(prs_offset(&p->in_data.data) == 0) {
455
456                 /*
457                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
458                  */
459
460                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
461                         /*
462                          * Ensure that the FIRST flag is set. If not then we have
463                          * a stream missmatch.
464                          */
465
466                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
467                         set_incoming_fault(p);
468                         prs_mem_free(&rpc_in);
469                         return -1;
470                 }
471
472                 /*
473                  * If this is the first PDU then set the endianness
474                  * flag in the pipe. We will need this when parsing all
475                  * data in this RPC.
476                  */
477
478                 p->endian = rpc_in.bigendian_data;
479
480                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
481                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
482
483         } else {
484
485                 /*
486                  * If this is *NOT* the first PDU then check the endianness
487                  * flag in the pipe is the same as that in the PDU.
488                  */
489
490                 if (p->endian != rpc_in.bigendian_data) {
491                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
492                         set_incoming_fault(p);
493                         prs_mem_free(&rpc_in);
494                         return -1;
495                 }
496         }
497
498         /*
499          * Ensure that the pdu length is sane.
500          */
501
502         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
503                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
504                 set_incoming_fault(p);
505                 prs_mem_free(&rpc_in);
506                 return -1;
507         }
508
509         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
510                         (unsigned int)p->hdr.flags ));
511
512         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
513
514         prs_mem_free(&rpc_in);
515
516         return 0; /* No extra data processed. */
517 }
518
519 /****************************************************************************
520  Call this to free any talloc'ed memory. Do this before and after processing
521  a complete PDU.
522 ****************************************************************************/
523
524 static void free_pipe_context(pipes_struct *p)
525 {
526         if (p->mem_ctx) {
527                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
528                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
529                 talloc_free_children(p->mem_ctx);
530         } else {
531                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
532                 if (p->mem_ctx == NULL) {
533                         p->fault_state = True;
534                 }
535         }
536 }
537
538 /****************************************************************************
539  Processes a request pdu. This will do auth processing if needed, and
540  appends the data into the complete stream if the LAST flag is not set.
541 ****************************************************************************/
542
543 static BOOL process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
544 {
545         uint32 ss_padding_len = 0;
546         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
547                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
548
549         if(!p->pipe_bound) {
550                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
551                 set_incoming_fault(p);
552                 return False;
553         }
554
555         /*
556          * Check if we need to do authentication processing.
557          * This is only done on requests, not binds.
558          */
559
560         /*
561          * Read the RPC request header.
562          */
563
564         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
565                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
566                 set_incoming_fault(p);
567                 return False;
568         }
569
570         switch(p->auth.auth_type) {
571                 case PIPE_AUTH_TYPE_NONE:
572                         break;
573
574                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
575                 case PIPE_AUTH_TYPE_NTLMSSP:
576                 {
577                         NTSTATUS status;
578                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
579                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
580                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
581                                 set_incoming_fault(p);
582                                 return False;
583                         }
584                         break;
585                 }
586
587                 case PIPE_AUTH_TYPE_SCHANNEL:
588                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
589                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
590                                 set_incoming_fault(p);
591                                 return False;
592                         }
593                         break;
594
595                 default:
596                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
597                         set_incoming_fault(p);
598                         return False;
599         }
600
601         /* Now we've done the sign/seal we can remove any padding data. */
602         if (data_len > ss_padding_len) {
603                 data_len -= ss_padding_len;
604         }
605
606         /*
607          * Check the data length doesn't go over the 15Mb limit.
608          * increased after observing a bug in the Windows NT 4.0 SP6a
609          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
610          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
611          */
612         
613         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
614                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
615                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
616                 set_incoming_fault(p);
617                 return False;
618         }
619
620         /*
621          * Append the data portion into the buffer and return.
622          */
623
624         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
625                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
626                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
627                 set_incoming_fault(p);
628                 return False;
629         }
630
631         if(p->hdr.flags & RPC_FLG_LAST) {
632                 BOOL ret = False;
633                 /*
634                  * Ok - we finally have a complete RPC stream.
635                  * Call the rpc command to process it.
636                  */
637
638                 /*
639                  * Ensure the internal prs buffer size is *exactly* the same
640                  * size as the current offset.
641                  */
642
643                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
644                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
645                         set_incoming_fault(p);
646                         return False;
647                 }
648
649                 /*
650                  * Set the parse offset to the start of the data and set the
651                  * prs_struct to UNMARSHALL.
652                  */
653
654                 prs_set_offset(&p->in_data.data, 0);
655                 prs_switch_type(&p->in_data.data, UNMARSHALL);
656
657                 /*
658                  * Process the complete data stream here.
659                  */
660
661                 free_pipe_context(p);
662
663                 if(pipe_init_outgoing_data(p)) {
664                         ret = api_pipe_request(p);
665                 }
666
667                 free_pipe_context(p);
668
669                 /*
670                  * We have consumed the whole data stream. Set back to
671                  * marshalling and set the offset back to the start of
672                  * the buffer to re-use it (we could also do a prs_mem_free()
673                  * and then re_init on the next start of PDU. Not sure which
674                  * is best here.... JRA.
675                  */
676
677                 prs_switch_type(&p->in_data.data, MARSHALL);
678                 prs_set_offset(&p->in_data.data, 0);
679                 return ret;
680         }
681
682         return True;
683 }
684
685 /****************************************************************************
686  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
687  already been parsed and stored in p->hdr.
688 ****************************************************************************/
689
690 static void process_complete_pdu(pipes_struct *p)
691 {
692         prs_struct rpc_in;
693         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
694         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
695         BOOL reply = False;
696
697         if(p->fault_state) {
698                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
699                         p->name ));
700                 set_incoming_fault(p);
701                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
702                 return;
703         }
704
705         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
706
707         /*
708          * Ensure we're using the corrent endianness for both the 
709          * RPC header flags and the raw data we will be reading from.
710          */
711
712         prs_set_endian_data( &rpc_in, p->endian);
713         prs_set_endian_data( &p->in_data.data, p->endian);
714
715         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
716
717         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
718                         (unsigned int)p->hdr.pkt_type ));
719
720         switch (p->hdr.pkt_type) {
721                 case RPC_REQUEST:
722                         reply = process_request_pdu(p, &rpc_in);
723                         break;
724
725                 case RPC_PING: /* CL request - ignore... */
726                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
727                                 (unsigned int)p->hdr.pkt_type, p->name));
728                         break;
729
730                 case RPC_RESPONSE: /* No responses here. */
731                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
732                                 p->name ));
733                         break;
734
735                 case RPC_FAULT:
736                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
737                 case RPC_NOCALL: /* CL - server reply to a ping call. */
738                 case RPC_REJECT:
739                 case RPC_ACK:
740                 case RPC_CL_CANCEL:
741                 case RPC_FACK:
742                 case RPC_CANCEL_ACK:
743                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
744                                 (unsigned int)p->hdr.pkt_type, p->name));
745                         break;
746
747                 case RPC_BIND:
748                         /*
749                          * We assume that a pipe bind is only in one pdu.
750                          */
751                         if(pipe_init_outgoing_data(p)) {
752                                 reply = api_pipe_bind_req(p, &rpc_in);
753                         }
754                         break;
755
756                 case RPC_BINDACK:
757                 case RPC_BINDNACK:
758                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
759                                 (unsigned int)p->hdr.pkt_type, p->name));
760                         break;
761
762
763                 case RPC_ALTCONT:
764                         /*
765                          * We assume that a pipe bind is only in one pdu.
766                          */
767                         if(pipe_init_outgoing_data(p)) {
768                                 reply = api_pipe_alter_context(p, &rpc_in);
769                         }
770                         break;
771
772                 case RPC_ALTCONTRESP:
773                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
774                                 p->name));
775                         break;
776
777                 case RPC_AUTH3:
778                         /*
779                          * The third packet in an NTLMSSP auth exchange.
780                          */
781                         if(pipe_init_outgoing_data(p)) {
782                                 reply = api_pipe_bind_auth3(p, &rpc_in);
783                         }
784                         break;
785
786                 case RPC_SHUTDOWN:
787                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
788                                 p->name));
789                         break;
790
791                 case RPC_CO_CANCEL:
792                         /* For now just free all client data and continue processing. */
793                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
794                         /* As we never do asynchronous RPC serving, we can never cancel a
795                            call (as far as I know). If we ever did we'd have to send a cancel_ack
796                            reply. For now, just free all client data and continue processing. */
797                         reply = True;
798                         break;
799 #if 0
800                         /* Enable this if we're doing async rpc. */
801                         /* We must check the call-id matches the outstanding callid. */
802                         if(pipe_init_outgoing_data(p)) {
803                                 /* Send a cancel_ack PDU reply. */
804                                 /* We should probably check the auth-verifier here. */
805                                 reply = setup_cancel_ack_reply(p, &rpc_in);
806                         }
807                         break;
808 #endif
809
810                 case RPC_ORPHANED:
811                         /* We should probably check the auth-verifier here.
812                            For now just free all client data and continue processing. */
813                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
814                         reply = True;
815                         break;
816
817                 default:
818                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
819                         break;
820         }
821
822         /* Reset to little endian. Probably don't need this but it won't hurt. */
823         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
824
825         if (!reply) {
826                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
827                 set_incoming_fault(p);
828                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
829                 prs_mem_free(&rpc_in);
830         } else {
831                 /*
832                  * Reset the lengths. We're ready for a new pdu.
833                  */
834                 p->in_data.pdu_needed_len = 0;
835                 p->in_data.pdu_received_len = 0;
836         }
837
838         prs_mem_free(&rpc_in);
839 }
840
841 /****************************************************************************
842  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
843 ****************************************************************************/
844
845 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
846 {
847         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
848
849         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
850                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
851                 (unsigned int)n ));
852
853         if(data_to_copy == 0) {
854                 /*
855                  * This is an error - data is being received and there is no
856                  * space in the PDU. Free the received data and go into the fault state.
857                  */
858                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
859 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
860                 set_incoming_fault(p);
861                 return -1;
862         }
863
864         /*
865          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
866          * number of bytes before we can do anything.
867          */
868
869         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
870                 /*
871                  * Always return here. If we have more data then the RPC_HEADER
872                  * will be processed the next time around the loop.
873                  */
874                 return fill_rpc_header(p, data, data_to_copy);
875         }
876
877         /*
878          * At this point we know we have at least an RPC_HEADER_LEN amount of data
879          * stored in current_in_pdu.
880          */
881
882         /*
883          * If pdu_needed_len is zero this is a new pdu. 
884          * Unmarshall the header so we know how much more
885          * data we need, then loop again.
886          */
887
888         if(p->in_data.pdu_needed_len == 0) {
889                 ssize_t rret = unmarshall_rpc_header(p);
890                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
891                         return rret;
892                 }
893                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
894                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
895                    pdu type. Deal with this in process_complete_pdu(). */
896         }
897
898         /*
899          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
900          * Keep reading until we have a full pdu.
901          */
902
903         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
904
905         /*
906          * Copy as much of the data as we need into the current_in_pdu buffer.
907          * pdu_needed_len becomes zero when we have a complete pdu.
908          */
909
910         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
911         p->in_data.pdu_received_len += data_to_copy;
912         p->in_data.pdu_needed_len -= data_to_copy;
913
914         /*
915          * Do we have a complete PDU ?
916          * (return the number of bytes handled in the call)
917          */
918
919         if(p->in_data.pdu_needed_len == 0) {
920                 process_complete_pdu(p);
921                 return data_to_copy;
922         }
923
924         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
925                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
926
927         return (ssize_t)data_to_copy;
928 }
929
930 /****************************************************************************
931  Accepts incoming data on an rpc pipe.
932 ****************************************************************************/
933
934 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
935 {
936         DEBUG(6,("write_to_pipe: %x", p->pnum));
937
938         DEBUG(6,(" name: %s open: %s len: %d\n",
939                  p->name, BOOLSTR(p->open), (int)n));
940
941         dump_data(50, (uint8 *)data, n);
942
943         return p->namedpipe_write(p->np_state, data, n);
944 }
945
946 /****************************************************************************
947  Accepts incoming data on an internal rpc pipe.
948 ****************************************************************************/
949
950 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
951 {
952         pipes_struct *p = (pipes_struct*)np_conn;
953         size_t data_left = n;
954
955         while(data_left) {
956                 ssize_t data_used;
957
958                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
959
960                 data_used = process_incoming_data(p, data, data_left);
961
962                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
963
964                 if(data_used < 0) {
965                         return -1;
966                 }
967
968                 data_left -= data_used;
969                 data += data_used;
970         }       
971
972         return n;
973 }
974
975 /****************************************************************************
976  Replies to a request to read data from a pipe.
977
978  Headers are interspersed with the data at PDU intervals. By the time
979  this function is called, the start of the data could possibly have been
980  read by an SMBtrans (file_offset != 0).
981
982  Calling create_rpc_reply() here is a hack. The data should already
983  have been prepared into arrays of headers + data stream sections.
984 ****************************************************************************/
985
986 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
987                 BOOL *is_data_outstanding)
988 {
989         if (!p || !p->open) {
990                 DEBUG(0,("read_from_pipe: pipe not open\n"));
991                 return -1;              
992         }
993
994         DEBUG(6,("read_from_pipe: %x", p->pnum));
995
996         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
997 }
998
999 /****************************************************************************
1000  Replies to a request to read data from a pipe.
1001
1002  Headers are interspersed with the data at PDU intervals. By the time
1003  this function is called, the start of the data could possibly have been
1004  read by an SMBtrans (file_offset != 0).
1005
1006  Calling create_rpc_reply() here is a hack. The data should already
1007  have been prepared into arrays of headers + data stream sections.
1008 ****************************************************************************/
1009
1010 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
1011                 BOOL *is_data_outstanding)
1012 {
1013         pipes_struct *p = (pipes_struct*)np_conn;
1014         uint32 pdu_remaining = 0;
1015         ssize_t data_returned = 0;
1016
1017         if (!p) {
1018                 DEBUG(0,("read_from_pipe: pipe not open\n"));
1019                 return -1;              
1020         }
1021
1022         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1023
1024         /*
1025          * We cannot return more than one PDU length per
1026          * read request.
1027          */
1028
1029         /*
1030          * This condition should result in the connection being closed.  
1031          * Netapp filers seem to set it to 0xffff which results in domain
1032          * authentications failing.  Just ignore it so things work.
1033          */
1034
1035         if(n > RPC_MAX_PDU_FRAG_LEN) {
1036                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1037 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1038                 n = RPC_MAX_PDU_FRAG_LEN;
1039         }
1040
1041         /*
1042          * Determine if there is still data to send in the
1043          * pipe PDU buffer. Always send this first. Never
1044          * send more than is left in the current PDU. The
1045          * client should send a new read request for a new
1046          * PDU.
1047          */
1048
1049         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1050                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1051
1052                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1053 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1054                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1055
1056                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1057                 p->out_data.current_pdu_sent += (uint32)data_returned;
1058                 goto out;
1059         }
1060
1061         /*
1062          * At this point p->current_pdu_len == p->current_pdu_sent (which
1063          * may of course be zero if this is the first return fragment.
1064          */
1065
1066         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1067 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1068                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1069
1070         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1071                 /*
1072                  * We have sent all possible data, return 0.
1073                  */
1074                 data_returned = 0;
1075                 goto out;
1076         }
1077
1078         /*
1079          * We need to create a new PDU from the data left in p->rdata.
1080          * Create the header/data/footers. This also sets up the fields
1081          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1082          * and stores the outgoing PDU in p->current_pdu.
1083          */
1084
1085         if(!create_next_pdu(p)) {
1086                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1087                 return -1;
1088         }
1089
1090         data_returned = MIN(n, p->out_data.current_pdu_len);
1091
1092         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1093         p->out_data.current_pdu_sent += (uint32)data_returned;
1094
1095   out:
1096
1097         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1098         return data_returned;
1099 }
1100
1101 /****************************************************************************
1102  Wait device state on a pipe. Exactly what this is for is unknown...
1103 ****************************************************************************/
1104
1105 BOOL wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1106 {
1107         if (p == NULL) {
1108                 return False;
1109         }
1110
1111         if (p->open) {
1112                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1113                          priority, p->name));
1114
1115                 p->priority = priority;
1116                 
1117                 return True;
1118         } 
1119
1120         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1121                  priority, p->name));
1122         return False;
1123 }
1124
1125
1126 /****************************************************************************
1127  Set device state on a pipe. Exactly what this is for is unknown...
1128 ****************************************************************************/
1129
1130 BOOL set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1131 {
1132         if (p == NULL) {
1133                 return False;
1134         }
1135
1136         if (p->open) {
1137                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1138                          device_state, p->name));
1139
1140                 p->device_state = device_state;
1141                 
1142                 return True;
1143         } 
1144
1145         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1146                  device_state, p->name));
1147         return False;
1148 }
1149
1150
1151 /****************************************************************************
1152  Close an rpc pipe.
1153 ****************************************************************************/
1154
1155 BOOL close_rpc_pipe_hnd(smb_np_struct *p)
1156 {
1157         if (!p) {
1158                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1159                 return False;
1160         }
1161
1162         p->namedpipe_close(p->np_state);
1163
1164         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1165
1166         pipes_open--;
1167
1168         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1169                  p->name, p->pnum, pipes_open));  
1170
1171         DLIST_REMOVE(Pipes, p);
1172         
1173         /* TODO: Remove from pipe open db */
1174         
1175         if ( !delete_pipe_opendb( p ) ) {
1176                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1177                         "pipe from open db.\n", p->name));
1178         }
1179
1180         ZERO_STRUCTP(p);
1181
1182         SAFE_FREE(p);
1183
1184         return True;
1185 }
1186
1187 /****************************************************************************
1188  Close all pipes on a connection.
1189 ****************************************************************************/
1190
1191 void pipe_close_conn(connection_struct *conn)
1192 {
1193         smb_np_struct *p, *next;
1194
1195         for (p=Pipes;p;p=next) {
1196                 next = p->next;
1197                 if (p->conn == conn) {
1198                         close_rpc_pipe_hnd(p);
1199                 }
1200         }
1201 }
1202
1203 /****************************************************************************
1204  Close an rpc pipe.
1205 ****************************************************************************/
1206
1207 static BOOL close_internal_rpc_pipe_hnd(void *np_conn)
1208 {
1209         pipes_struct *p = (pipes_struct *)np_conn;
1210         if (!p) {
1211                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1212                 return False;
1213         }
1214
1215         prs_mem_free(&p->out_data.rdata);
1216         prs_mem_free(&p->in_data.data);
1217
1218         if (p->auth.auth_data_free_func) {
1219                 (*p->auth.auth_data_free_func)(&p->auth);
1220         }
1221
1222         if (p->mem_ctx) {
1223                 talloc_destroy(p->mem_ctx);
1224         }
1225
1226         if (p->pipe_state_mem_ctx) {
1227                 talloc_destroy(p->pipe_state_mem_ctx);
1228         }
1229
1230         free_pipe_rpc_context( p->contexts );
1231
1232         /* Free the handles database. */
1233         close_policy_by_pipe(p);
1234
1235         TALLOC_FREE(p->pipe_user.nt_user_token);
1236         data_blob_free(&p->session_key);
1237         SAFE_FREE(p->pipe_user.ut.groups);
1238
1239         DLIST_REMOVE(InternalPipes, p);
1240
1241         ZERO_STRUCTP(p);
1242
1243         SAFE_FREE(p);
1244         
1245         return True;
1246 }
1247
1248 /****************************************************************************
1249  Find an rpc pipe given a pipe handle in a buffer and an offset.
1250 ****************************************************************************/
1251
1252 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1253 {
1254         if (chain_p) {
1255                 return chain_p;
1256         }
1257
1258         return get_rpc_pipe(pnum);
1259 }
1260
1261 /****************************************************************************
1262  Find an rpc pipe given a pipe handle.
1263 ****************************************************************************/
1264
1265 smb_np_struct *get_rpc_pipe(int pnum)
1266 {
1267         smb_np_struct *p;
1268
1269         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1270
1271         for (p=Pipes;p;p=p->next) {
1272                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1273                           p->name, p->pnum, pipes_open));  
1274         }
1275
1276         for (p=Pipes;p;p=p->next) {
1277                 if (p->pnum == pnum) {
1278                         chain_p = p;
1279                         return p;
1280                 }
1281         }
1282
1283         return NULL;
1284 }