s3-rpc_server: Make process_incoming_data() public.
[kamenim/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
28 #include "fake_file.h"
29 #include "rpc_dce.h"
30
31 #undef DBGC_CLASS
32 #define DBGC_CLASS DBGC_RPC_SRV
33
34 /****************************************************************************
35  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
36 ****************************************************************************/
37
38 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
39 {
40         size_t len_needed_to_complete_hdr =
41                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
42
43         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
44                    "len_needed_to_complete_hdr = %u, "
45                    "receive_len = %u\n",
46                    (unsigned int)data_to_copy,
47                    (unsigned int)len_needed_to_complete_hdr,
48                    (unsigned int)p->in_data.pdu.length ));
49
50         if (p->in_data.pdu.data == NULL) {
51                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
52         }
53         if (p->in_data.pdu.data == NULL) {
54                 DEBUG(0, ("talloc failed\n"));
55                 return -1;
56         }
57
58         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
59                 data, len_needed_to_complete_hdr);
60         p->in_data.pdu.length += len_needed_to_complete_hdr;
61
62         return (ssize_t)len_needed_to_complete_hdr;
63 }
64
65 static bool get_pdu_size(struct pipes_struct *p)
66 {
67         uint16_t frag_len;
68         /* the fill_rpc_header() call insures we copy only
69          * RPC_HEADER_LEN bytes. If this doesn't match then
70          * somethign is very wrong and we can only abort */
71         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
72                 DEBUG(0, ("Unexpected RPC Header size! "
73                           "got %d, expected %d)\n",
74                           (int)p->in_data.pdu.length,
75                           RPC_HEADER_LEN));
76                 set_incoming_fault(p);
77                 return false;
78         }
79
80         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
81
82         /* verify it is a reasonable value */
83         if ((frag_len < RPC_HEADER_LEN) ||
84             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
85                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
86                           frag_len));
87                 set_incoming_fault(p);
88                 return false;
89         }
90
91         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
92
93         /* allocate the space needed to fill the pdu */
94         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
95                                                 uint8_t, frag_len);
96         if (p->in_data.pdu.data == NULL) {
97                 DEBUG(0, ("talloc_realloc failed\n"));
98                 set_incoming_fault(p);
99                 return false;
100         }
101
102         return true;
103 }
104
105 /****************************************************************************
106   Call this to free any talloc'ed memory. Do this after processing
107   a complete incoming and outgoing request (multiple incoming/outgoing
108   PDU's).
109 ****************************************************************************/
110
111 static void free_pipe_context(struct pipes_struct *p)
112 {
113         data_blob_free(&p->out_data.frag);
114         data_blob_free(&p->out_data.rdata);
115         data_blob_free(&p->in_data.data);
116
117         DEBUG(3, ("free_pipe_context: "
118                 "destroying talloc pool of size %lu\n",
119                 (unsigned long)talloc_total_size(p->mem_ctx)));
120         talloc_free_children(p->mem_ctx);
121 }
122
123 /****************************************************************************
124  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
125 ****************************************************************************/
126
127 ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
128 {
129         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
130                                         - p->in_data.pdu.length);
131
132         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
133                    "pdu_needed_len = %u, incoming data = %u\n",
134                    (unsigned int)p->in_data.pdu.length,
135                    (unsigned int)p->in_data.pdu_needed_len,
136                    (unsigned int)n ));
137
138         if(data_to_copy == 0) {
139                 /*
140                  * This is an error - data is being received and there is no
141                  * space in the PDU. Free the received data and go into the
142                  * fault state.
143                  */
144                 DEBUG(0, ("process_incoming_data: "
145                           "No space in incoming pdu buffer. "
146                           "Current size = %u incoming data size = %u\n",
147                           (unsigned int)p->in_data.pdu.length,
148                           (unsigned int)n));
149                 set_incoming_fault(p);
150                 return -1;
151         }
152
153         /*
154          * If we have no data already, wait until we get at least
155          * a RPC_HEADER_LEN * number of bytes before we can do anything.
156          */
157
158         if ((p->in_data.pdu_needed_len == 0) &&
159             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
160                 /*
161                  * Always return here. If we have more data then the RPC_HEADER
162                  * will be processed the next time around the loop.
163                  */
164                 return fill_rpc_header(p, data, data_to_copy);
165         }
166
167         /*
168          * At this point we know we have at least an RPC_HEADER_LEN amount of
169          * data stored in p->in_data.pdu.
170          */
171
172         /*
173          * If pdu_needed_len is zero this is a new pdu.
174          * Check how much more data we need, then loop again.
175          */
176         if (p->in_data.pdu_needed_len == 0) {
177
178                 bool ok = get_pdu_size(p);
179                 if (!ok) {
180                         return -1;
181                 }
182                 if (p->in_data.pdu_needed_len > 0) {
183                         return 0;
184                 }
185
186                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
187                  * that consists of an RPC_HEADER only. This is a
188                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
189                  * DCERPC_PKT_ORPHANED pdu type.
190                  * Deal with this in process_complete_pdu(). */
191         }
192
193         /*
194          * Ok - at this point we have a valid RPC_HEADER.
195          * Keep reading until we have a full pdu.
196          */
197
198         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
199
200         /*
201          * Copy as much of the data as we need into the p->in_data.pdu buffer.
202          * pdu_needed_len becomes zero when we have a complete pdu.
203          */
204
205         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
206                 data, data_to_copy);
207         p->in_data.pdu.length += data_to_copy;
208         p->in_data.pdu_needed_len -= data_to_copy;
209
210         /*
211          * Do we have a complete PDU ?
212          * (return the number of bytes handled in the call)
213          */
214
215         if(p->in_data.pdu_needed_len == 0) {
216                 process_complete_pdu(p);
217                 return data_to_copy;
218         }
219
220         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
221                    "pdu.length = %u, pdu_needed_len = %u\n",
222                    (unsigned int)p->in_data.pdu.length,
223                    (unsigned int)p->in_data.pdu_needed_len));
224
225         return (ssize_t)data_to_copy;
226 }
227
228 /****************************************************************************
229  Accepts incoming data on an internal rpc pipe.
230 ****************************************************************************/
231
232 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
233 {
234         size_t data_left = n;
235
236         while(data_left) {
237                 ssize_t data_used;
238
239                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
240                           (unsigned int)data_left));
241
242                 data_used = process_incoming_data(p, data, data_left);
243
244                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
245                            (int)data_used));
246
247                 if(data_used < 0) {
248                         return -1;
249                 }
250
251                 data_left -= data_used;
252                 data += data_used;
253         }
254
255         return n;
256 }
257
258 /****************************************************************************
259  Replies to a request to read data from a pipe.
260
261  Headers are interspersed with the data at PDU intervals. By the time
262  this function is called, the start of the data could possibly have been
263  read by an SMBtrans (file_offset != 0).
264
265  Calling create_rpc_reply() here is a hack. The data should already
266  have been prepared into arrays of headers + data stream sections.
267 ****************************************************************************/
268
269 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
270                                        size_t n, bool *is_data_outstanding)
271 {
272         uint32 pdu_remaining = 0;
273         ssize_t data_returned = 0;
274
275         if (!p) {
276                 DEBUG(0,("read_from_pipe: pipe not open\n"));
277                 return -1;
278         }
279
280         DEBUG(6,(" name: %s len: %u\n",
281                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
282                  (unsigned int)n));
283
284         /*
285          * We cannot return more than one PDU length per
286          * read request.
287          */
288
289         /*
290          * This condition should result in the connection being closed.
291          * Netapp filers seem to set it to 0xffff which results in domain
292          * authentications failing.  Just ignore it so things work.
293          */
294
295         if(n > RPC_MAX_PDU_FRAG_LEN) {
296                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
297                          "pipe %s. We can only service %d sized reads.\n",
298                          (unsigned int)n,
299                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
300                          RPC_MAX_PDU_FRAG_LEN ));
301                 n = RPC_MAX_PDU_FRAG_LEN;
302         }
303
304         /*
305          * Determine if there is still data to send in the
306          * pipe PDU buffer. Always send this first. Never
307          * send more than is left in the current PDU. The
308          * client should send a new read request for a new
309          * PDU.
310          */
311
312         pdu_remaining = p->out_data.frag.length
313                 - p->out_data.current_pdu_sent;
314
315         if (pdu_remaining > 0) {
316                 data_returned = (ssize_t)MIN(n, pdu_remaining);
317
318                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
319                           "current_pdu_sent = %u returning %d bytes.\n",
320                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
321                           (unsigned int)p->out_data.frag.length,
322                           (unsigned int)p->out_data.current_pdu_sent,
323                           (int)data_returned));
324
325                 memcpy(data,
326                        p->out_data.frag.data
327                        + p->out_data.current_pdu_sent,
328                        data_returned);
329
330                 p->out_data.current_pdu_sent += (uint32)data_returned;
331                 goto out;
332         }
333
334         /*
335          * At this point p->current_pdu_len == p->current_pdu_sent (which
336          * may of course be zero if this is the first return fragment.
337          */
338
339         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
340                   "= %u, p->out_data.rdata.length = %u.\n",
341                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
342                   (int)p->fault_state,
343                   (unsigned int)p->out_data.data_sent_length,
344                   (unsigned int)p->out_data.rdata.length));
345
346         if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
347                 /*
348                  * We have sent all possible data, return 0.
349                  */
350                 data_returned = 0;
351                 goto out;
352         }
353
354         /*
355          * We need to create a new PDU from the data left in p->rdata.
356          * Create the header/data/footers. This also sets up the fields
357          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
358          * and stores the outgoing PDU in p->current_pdu.
359          */
360
361         if(!create_next_pdu(p)) {
362                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
363                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
364                 return -1;
365         }
366
367         data_returned = MIN(n, p->out_data.frag.length);
368
369         memcpy(data, p->out_data.frag.data, (size_t)data_returned);
370         p->out_data.current_pdu_sent += (uint32)data_returned;
371
372   out:
373         (*is_data_outstanding) = p->out_data.frag.length > n;
374
375         if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
376                 /* We've returned everything in the out_data.frag
377                  * so we're done with this pdu. Free it and reset
378                  * current_pdu_sent. */
379                 p->out_data.current_pdu_sent = 0;
380                 data_blob_free(&p->out_data.frag);
381
382                 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
383                         /*
384                          * We're completely finished with both outgoing and
385                          * incoming data streams. It's safe to free all
386                          * temporary data from this request.
387                          */
388                         free_pipe_context(p);
389                 }
390         }
391
392         return data_returned;
393 }
394
395 bool fsp_is_np(struct files_struct *fsp)
396 {
397         enum FAKE_FILE_TYPE type;
398
399         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
400                 return false;
401         }
402
403         type = fsp->fake_file_handle->type;
404
405         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
406                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
407 }
408
409 struct np_proxy_state {
410         uint16_t file_type;
411         uint16_t device_state;
412         uint64_t allocation_size;
413         struct tstream_context *npipe;
414         struct tevent_queue *read_queue;
415         struct tevent_queue *write_queue;
416 };
417
418 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
419                                 const char *pipe_name,
420                                 const struct tsocket_address *local_address,
421                                 const struct tsocket_address *remote_address,
422                                 struct auth_serversupplied_info *server_info)
423 {
424         struct np_proxy_state *result;
425         char *socket_np_dir;
426         const char *socket_dir;
427         struct tevent_context *ev;
428         struct tevent_req *subreq;
429         struct netr_SamInfo3 *info3;
430         NTSTATUS status;
431         bool ok;
432         int ret;
433         int sys_errno;
434
435         result = talloc(mem_ctx, struct np_proxy_state);
436         if (result == NULL) {
437                 DEBUG(0, ("talloc failed\n"));
438                 return NULL;
439         }
440
441         result->read_queue = tevent_queue_create(result, "np_read");
442         if (result->read_queue == NULL) {
443                 DEBUG(0, ("tevent_queue_create failed\n"));
444                 goto fail;
445         }
446
447         result->write_queue = tevent_queue_create(result, "np_write");
448         if (result->write_queue == NULL) {
449                 DEBUG(0, ("tevent_queue_create failed\n"));
450                 goto fail;
451         }
452
453         ev = s3_tevent_context_init(talloc_tos());
454         if (ev == NULL) {
455                 DEBUG(0, ("s3_tevent_context_init failed\n"));
456                 goto fail;
457         }
458
459         socket_dir = lp_parm_const_string(
460                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
461                 get_dyn_NCALRPCDIR());
462         if (socket_dir == NULL) {
463                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
464                 goto fail;
465         }
466         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
467         if (socket_np_dir == NULL) {
468                 DEBUG(0, ("talloc_asprintf failed\n"));
469                 goto fail;
470         }
471
472         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
473         if (info3 == NULL) {
474                 DEBUG(0, ("talloc failed\n"));
475                 goto fail;
476         }
477
478         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
479         if (!NT_STATUS_IS_OK(status)) {
480                 TALLOC_FREE(info3);
481                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
482                           nt_errstr(status)));
483                 goto fail;
484         }
485
486         become_root();
487         subreq = tstream_npa_connect_send(talloc_tos(), ev,
488                                           socket_np_dir,
489                                           pipe_name,
490                                           remote_address, /* client_addr */
491                                           NULL, /* client_name */
492                                           local_address, /* server_addr */
493                                           NULL, /* server_name */
494                                           info3,
495                                           server_info->user_session_key,
496                                           data_blob_null /* delegated_creds */);
497         if (subreq == NULL) {
498                 unbecome_root();
499                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
500                           "user %s\\%s failed\n",
501                           socket_np_dir, pipe_name, info3->base.domain.string,
502                           info3->base.account_name.string));
503                 goto fail;
504         }
505         ok = tevent_req_poll(subreq, ev);
506         unbecome_root();
507         if (!ok) {
508                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
509                           "failed for tstream_npa_connect: %s\n",
510                           socket_np_dir, pipe_name, info3->base.domain.string,
511                           info3->base.account_name.string,
512                           strerror(errno)));
513                 goto fail;
514
515         }
516         ret = tstream_npa_connect_recv(subreq, &sys_errno,
517                                        result,
518                                        &result->npipe,
519                                        &result->file_type,
520                                        &result->device_state,
521                                        &result->allocation_size);
522         TALLOC_FREE(subreq);
523         if (ret != 0) {
524                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
525                           "user %s\\%s failed: %s\n",
526                           socket_np_dir, pipe_name, info3->base.domain.string,
527                           info3->base.account_name.string,
528                           strerror(sys_errno)));
529                 goto fail;
530         }
531
532         return result;
533
534  fail:
535         TALLOC_FREE(result);
536         return NULL;
537 }
538
539 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
540                  const struct tsocket_address *local_address,
541                  const struct tsocket_address *remote_address,
542                  struct client_address *client_id,
543                  struct auth_serversupplied_info *server_info,
544                  struct messaging_context *msg_ctx,
545                  struct fake_file_handle **phandle)
546 {
547         const char **proxy_list;
548         struct fake_file_handle *handle;
549
550         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
551
552         handle = talloc(mem_ctx, struct fake_file_handle);
553         if (handle == NULL) {
554                 return NT_STATUS_NO_MEMORY;
555         }
556
557         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
558                 struct np_proxy_state *p;
559
560                 p = make_external_rpc_pipe_p(handle, name,
561                                              local_address,
562                                              remote_address,
563                                              server_info);
564
565                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
566                 handle->private_data = p;
567         } else {
568                 struct pipes_struct *p;
569                 struct ndr_syntax_id syntax;
570
571                 if (!is_known_pipename(name, &syntax)) {
572                         TALLOC_FREE(handle);
573                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
574                 }
575
576                 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
577                                              server_info, msg_ctx);
578
579                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
580                 handle->private_data = p;
581         }
582
583         if (handle->private_data == NULL) {
584                 TALLOC_FREE(handle);
585                 return NT_STATUS_PIPE_NOT_AVAILABLE;
586         }
587
588         *phandle = handle;
589
590         return NT_STATUS_OK;
591 }
592
593 bool np_read_in_progress(struct fake_file_handle *handle)
594 {
595         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
596                 return false;
597         }
598
599         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
600                 struct np_proxy_state *p = talloc_get_type_abort(
601                         handle->private_data, struct np_proxy_state);
602                 size_t read_count;
603
604                 read_count = tevent_queue_length(p->read_queue);
605                 if (read_count > 0) {
606                         return true;
607                 }
608
609                 return false;
610         }
611
612         return false;
613 }
614
615 struct np_write_state {
616         struct event_context *ev;
617         struct np_proxy_state *p;
618         struct iovec iov;
619         ssize_t nwritten;
620 };
621
622 static void np_write_done(struct tevent_req *subreq);
623
624 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
625                                  struct fake_file_handle *handle,
626                                  const uint8_t *data, size_t len)
627 {
628         struct tevent_req *req;
629         struct np_write_state *state;
630         NTSTATUS status;
631
632         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
633         dump_data(50, data, len);
634
635         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
636         if (req == NULL) {
637                 return NULL;
638         }
639
640         if (len == 0) {
641                 state->nwritten = 0;
642                 status = NT_STATUS_OK;
643                 goto post_status;
644         }
645
646         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
647                 struct pipes_struct *p = talloc_get_type_abort(
648                         handle->private_data, struct pipes_struct);
649
650                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
651
652                 status = (state->nwritten >= 0)
653                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
654                 goto post_status;
655         }
656
657         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
658                 struct np_proxy_state *p = talloc_get_type_abort(
659                         handle->private_data, struct np_proxy_state);
660                 struct tevent_req *subreq;
661
662                 state->ev = ev;
663                 state->p = p;
664                 state->iov.iov_base = CONST_DISCARD(void *, data);
665                 state->iov.iov_len = len;
666
667                 subreq = tstream_writev_queue_send(state, ev,
668                                                    p->npipe,
669                                                    p->write_queue,
670                                                    &state->iov, 1);
671                 if (subreq == NULL) {
672                         goto fail;
673                 }
674                 tevent_req_set_callback(subreq, np_write_done, req);
675                 return req;
676         }
677
678         status = NT_STATUS_INVALID_HANDLE;
679  post_status:
680         if (NT_STATUS_IS_OK(status)) {
681                 tevent_req_done(req);
682         } else {
683                 tevent_req_nterror(req, status);
684         }
685         return tevent_req_post(req, ev);
686  fail:
687         TALLOC_FREE(req);
688         return NULL;
689 }
690
691 static void np_write_done(struct tevent_req *subreq)
692 {
693         struct tevent_req *req = tevent_req_callback_data(
694                 subreq, struct tevent_req);
695         struct np_write_state *state = tevent_req_data(
696                 req, struct np_write_state);
697         ssize_t received;
698         int err;
699
700         received = tstream_writev_queue_recv(subreq, &err);
701         if (received < 0) {
702                 tevent_req_nterror(req, map_nt_error_from_unix(err));
703                 return;
704         }
705         state->nwritten = received;
706         tevent_req_done(req);
707 }
708
709 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
710 {
711         struct np_write_state *state = tevent_req_data(
712                 req, struct np_write_state);
713         NTSTATUS status;
714
715         if (tevent_req_is_nterror(req, &status)) {
716                 return status;
717         }
718         *pnwritten = state->nwritten;
719         return NT_STATUS_OK;
720 }
721
722 struct np_ipc_readv_next_vector_state {
723         uint8_t *buf;
724         size_t len;
725         off_t ofs;
726         size_t remaining;
727 };
728
729 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
730                                           uint8_t *buf, size_t len)
731 {
732         ZERO_STRUCTP(s);
733
734         s->buf = buf;
735         s->len = MIN(len, UINT16_MAX);
736 }
737
738 static int np_ipc_readv_next_vector(struct tstream_context *stream,
739                                     void *private_data,
740                                     TALLOC_CTX *mem_ctx,
741                                     struct iovec **_vector,
742                                     size_t *count)
743 {
744         struct np_ipc_readv_next_vector_state *state =
745                 (struct np_ipc_readv_next_vector_state *)private_data;
746         struct iovec *vector;
747         ssize_t pending;
748         size_t wanted;
749
750         if (state->ofs == state->len) {
751                 *_vector = NULL;
752                 *count = 0;
753                 return 0;
754         }
755
756         pending = tstream_pending_bytes(stream);
757         if (pending == -1) {
758                 return -1;
759         }
760
761         if (pending == 0 && state->ofs != 0) {
762                 /* return a short read */
763                 *_vector = NULL;
764                 *count = 0;
765                 return 0;
766         }
767
768         if (pending == 0) {
769                 /* we want at least one byte and recheck again */
770                 wanted = 1;
771         } else {
772                 size_t missing = state->len - state->ofs;
773                 if (pending > missing) {
774                         /* there's more available */
775                         state->remaining = pending - missing;
776                         wanted = missing;
777                 } else {
778                         /* read what we can get and recheck in the next cycle */
779                         wanted = pending;
780                 }
781         }
782
783         vector = talloc_array(mem_ctx, struct iovec, 1);
784         if (!vector) {
785                 return -1;
786         }
787
788         vector[0].iov_base = state->buf + state->ofs;
789         vector[0].iov_len = wanted;
790
791         state->ofs += wanted;
792
793         *_vector = vector;
794         *count = 1;
795         return 0;
796 }
797
798 struct np_read_state {
799         struct np_proxy_state *p;
800         struct np_ipc_readv_next_vector_state next_vector;
801
802         size_t nread;
803         bool is_data_outstanding;
804 };
805
806 static void np_read_done(struct tevent_req *subreq);
807
808 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
809                                 struct fake_file_handle *handle,
810                                 uint8_t *data, size_t len)
811 {
812         struct tevent_req *req;
813         struct np_read_state *state;
814         NTSTATUS status;
815
816         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
817         if (req == NULL) {
818                 return NULL;
819         }
820
821         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
822                 struct pipes_struct *p = talloc_get_type_abort(
823                         handle->private_data, struct pipes_struct);
824
825                 state->nread = read_from_internal_pipe(
826                         p, (char *)data, len, &state->is_data_outstanding);
827
828                 status = (state->nread >= 0)
829                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
830                 goto post_status;
831         }
832
833         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
834                 struct np_proxy_state *p = talloc_get_type_abort(
835                         handle->private_data, struct np_proxy_state);
836                 struct tevent_req *subreq;
837
838                 np_ipc_readv_next_vector_init(&state->next_vector,
839                                               data, len);
840
841                 subreq = tstream_readv_pdu_queue_send(state,
842                                                       ev,
843                                                       p->npipe,
844                                                       p->read_queue,
845                                                       np_ipc_readv_next_vector,
846                                                       &state->next_vector);
847                 if (subreq == NULL) {
848                         status = NT_STATUS_NO_MEMORY;
849                         goto post_status;
850                 }
851                 tevent_req_set_callback(subreq, np_read_done, req);
852                 return req;
853         }
854
855         status = NT_STATUS_INVALID_HANDLE;
856  post_status:
857         if (NT_STATUS_IS_OK(status)) {
858                 tevent_req_done(req);
859         } else {
860                 tevent_req_nterror(req, status);
861         }
862         return tevent_req_post(req, ev);
863 }
864
865 static void np_read_done(struct tevent_req *subreq)
866 {
867         struct tevent_req *req = tevent_req_callback_data(
868                 subreq, struct tevent_req);
869         struct np_read_state *state = tevent_req_data(
870                 req, struct np_read_state);
871         ssize_t ret;
872         int err;
873
874         ret = tstream_readv_pdu_queue_recv(subreq, &err);
875         TALLOC_FREE(subreq);
876         if (ret == -1) {
877                 tevent_req_nterror(req, map_nt_error_from_unix(err));
878                 return;
879         }
880
881         state->nread = ret;
882         state->is_data_outstanding = (state->next_vector.remaining > 0);
883
884         tevent_req_done(req);
885         return;
886 }
887
888 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
889                       bool *is_data_outstanding)
890 {
891         struct np_read_state *state = tevent_req_data(
892                 req, struct np_read_state);
893         NTSTATUS status;
894
895         if (tevent_req_is_nterror(req, &status)) {
896                 return status;
897         }
898         *nread = state->nread;
899         *is_data_outstanding = state->is_data_outstanding;
900         return NT_STATUS_OK;
901 }
902
903 /**
904  * @brief Create a new RPC client context which uses a local dispatch function.
905  *
906  * @param[in]  conn  The connection struct that will hold the pipe
907  *
908  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
909  *
910  * @return              NT_STATUS_OK on success, a corresponding NT status if an
911  *                      error occured.
912  */
913 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
914                                   struct rpc_pipe_client **spoolss_pipe)
915 {
916         NTSTATUS status;
917
918         /* TODO: check and handle disconnections */
919
920         if (!conn->spoolss_pipe) {
921                 status = rpc_pipe_open_internal(conn,
922                                                 &ndr_table_spoolss.syntax_id,
923                                                 conn->server_info,
924                                                 &conn->sconn->client_id,
925                                                 conn->sconn->msg_ctx,
926                                                 &conn->spoolss_pipe);
927                 if (!NT_STATUS_IS_OK(status)) {
928                         return status;
929                 }
930         }
931
932         *spoolss_pipe = conn->spoolss_pipe;
933         return NT_STATUS_OK;
934 }