2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops;
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
48 #define TSTREAM_CLI_NP_BUF_SIZE 4280
50 struct tstream_cli_np {
51 struct cli_state *cli;
54 unsigned int default_timeout;
58 struct tevent_req *read_req;
59 struct tevent_req *write_req;
66 uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
70 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
74 if (!cli_state_is_connected(cli_nps->cli)) {
79 * TODO: do not use a sync call with a destructor!!!
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
91 status = cli_close(cli_nps->cli, cli_nps->fnum);
92 if (!NT_STATUS_IS_OK(status)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps->npipe, nt_errstr(status)));
98 * We can't do much on failure
103 struct tstream_cli_np_open_state {
104 struct cli_state *cli;
109 static void tstream_cli_np_open_done(struct tevent_req *subreq);
111 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
112 struct tevent_context *ev,
113 struct cli_state *cli,
116 struct tevent_req *req;
117 struct tstream_cli_np_open_state *state;
118 struct tevent_req *subreq;
120 req = tevent_req_create(mem_ctx, &state,
121 struct tstream_cli_np_open_state);
127 state->npipe = talloc_strdup(state, npipe);
128 if (tevent_req_nomem(state->npipe, req)) {
129 return tevent_req_post(req, ev);
132 subreq = cli_ntcreate_send(state, ev, cli,
137 FILE_SHARE_READ|FILE_SHARE_WRITE,
141 if (tevent_req_nomem(subreq, req)) {
142 return tevent_req_post(req, ev);
144 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
149 static void tstream_cli_np_open_done(struct tevent_req *subreq)
151 struct tevent_req *req =
152 tevent_req_callback_data(subreq, struct tevent_req);
153 struct tstream_cli_np_open_state *state =
154 tevent_req_data(req, struct tstream_cli_np_open_state);
157 status = cli_ntcreate_recv(subreq, &state->fnum);
159 if (!NT_STATUS_IS_OK(status)) {
160 tevent_req_nterror(req, status);
164 tevent_req_done(req);
167 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
169 struct tstream_context **_stream,
170 const char *location)
172 struct tstream_cli_np_open_state *state =
173 tevent_req_data(req, struct tstream_cli_np_open_state);
174 struct tstream_context *stream;
175 struct tstream_cli_np *cli_nps;
178 if (tevent_req_is_nterror(req, &status)) {
179 tevent_req_received(req);
183 stream = tstream_context_create(mem_ctx,
186 struct tstream_cli_np,
189 tevent_req_received(req);
190 return NT_STATUS_NO_MEMORY;
192 ZERO_STRUCTP(cli_nps);
194 cli_nps->cli = state->cli;
195 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
196 cli_nps->fnum = state->fnum;
197 cli_nps->default_timeout = state->cli->timeout;
199 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
201 cli_nps->trans.active = false;
202 cli_nps->trans.read_req = NULL;
203 cli_nps->trans.write_req = NULL;
204 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
205 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
208 tevent_req_received(req);
212 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
214 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
215 struct tstream_cli_np);
217 if (!cli_state_is_connected(cli_nps->cli)) {
222 return cli_nps->read.left;
225 bool tstream_is_cli_np(struct tstream_context *stream)
227 struct tstream_cli_np *cli_nps =
228 talloc_get_type(_tstream_context_data(stream),
229 struct tstream_cli_np);
238 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
240 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
241 struct tstream_cli_np);
243 if (cli_nps->trans.read_req) {
244 return NT_STATUS_PIPE_BUSY;
247 if (cli_nps->trans.write_req) {
248 return NT_STATUS_PIPE_BUSY;
251 if (cli_nps->trans.active) {
252 return NT_STATUS_PIPE_BUSY;
255 cli_nps->trans.active = true;
260 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
261 unsigned int timeout)
263 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
264 struct tstream_cli_np);
266 if (!cli_state_is_connected(cli_nps->cli)) {
267 return cli_nps->default_timeout;
270 return cli_set_timeout(cli_nps->cli, timeout);
273 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
275 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
276 struct tstream_cli_np);
281 struct tstream_cli_np_writev_state {
282 struct tstream_context *stream;
283 struct tevent_context *ev;
285 struct iovec *vector;
292 const char *location;
296 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
298 struct tstream_cli_np *cli_nps =
299 tstream_context_data(state->stream,
300 struct tstream_cli_np);
302 cli_nps->trans.write_req = NULL;
307 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
309 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
310 struct tevent_context *ev,
311 struct tstream_context *stream,
312 const struct iovec *vector,
315 struct tevent_req *req;
316 struct tstream_cli_np_writev_state *state;
317 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
318 struct tstream_cli_np);
320 req = tevent_req_create(mem_ctx, &state,
321 struct tstream_cli_np_writev_state);
325 state->stream = stream;
329 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
331 if (!cli_state_is_connected(cli_nps->cli)) {
332 tevent_req_error(req, ENOTCONN);
333 return tevent_req_post(req, ev);
337 * we make a copy of the vector so we can change the structure
339 state->vector = talloc_array(state, struct iovec, count);
340 if (tevent_req_nomem(state->vector, req)) {
341 return tevent_req_post(req, ev);
343 memcpy(state->vector, vector, sizeof(struct iovec) * count);
344 state->count = count;
346 tstream_cli_np_writev_write_next(req);
347 if (!tevent_req_is_in_progress(req)) {
348 return tevent_req_post(req, ev);
354 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
355 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
357 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
359 struct tstream_cli_np_writev_state *state =
361 struct tstream_cli_np_writev_state);
362 struct tstream_cli_np *cli_nps =
363 tstream_context_data(state->stream,
364 struct tstream_cli_np);
365 struct tevent_req *subreq;
367 cli_nps->write.ofs = 0;
368 cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
371 * copy the pending buffer first
373 while (cli_nps->write.left > 0 && state->count > 0) {
374 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
375 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
377 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
380 state->vector[0].iov_base = base;
381 state->vector[0].iov_len -= len;
383 cli_nps->write.ofs += len;
384 cli_nps->write.left -= len;
386 if (state->vector[0].iov_len == 0) {
394 if (cli_nps->write.ofs == 0) {
395 tevent_req_done(req);
399 if (cli_nps->trans.active && state->count == 0) {
400 cli_nps->trans.active = false;
401 cli_nps->trans.write_req = req;
405 if (cli_nps->trans.read_req && state->count == 0) {
406 cli_nps->trans.write_req = req;
407 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
411 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
413 8, /* 8 means message mode. */
414 cli_nps->write.buf, 0,
416 if (tevent_req_nomem(subreq, req)) {
419 tevent_req_set_callback(subreq,
420 tstream_cli_np_writev_write_done,
424 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
426 const char *location);
428 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
430 struct tevent_req *req =
431 tevent_req_callback_data(subreq, struct tevent_req);
432 struct tstream_cli_np_writev_state *state =
433 tevent_req_data(req, struct tstream_cli_np_writev_state);
434 struct tstream_cli_np *cli_nps =
435 tstream_context_data(state->stream,
436 struct tstream_cli_np);
440 status = cli_write_andx_recv(subreq, &written);
442 if (!NT_STATUS_IS_OK(status)) {
443 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
447 if (written != cli_nps->write.ofs) {
448 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
452 tstream_cli_np_writev_write_next(req);
455 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
457 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
459 const char *location)
461 struct tstream_cli_np_writev_state *state =
463 struct tstream_cli_np_writev_state);
464 struct tstream_cli_np *cli_nps =
465 tstream_context_data(state->stream,
466 struct tstream_cli_np);
467 struct tevent_req *subreq;
469 state->error.val = error;
470 state->error.location = location;
472 if (!cli_state_is_connected(cli_nps->cli)) {
473 /* return the original error */
474 _tevent_req_error(req, state->error.val, state->error.location);
478 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
479 if (subreq == NULL) {
480 /* return the original error */
481 _tevent_req_error(req, state->error.val, state->error.location);
484 tevent_req_set_callback(subreq,
485 tstream_cli_np_writev_disconnect_done,
489 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
491 struct tevent_req *req =
492 tevent_req_callback_data(subreq, struct tevent_req);
493 struct tstream_cli_np_writev_state *state =
494 tevent_req_data(req, struct tstream_cli_np_writev_state);
495 struct tstream_cli_np *cli_nps =
496 tstream_context_data(state->stream, struct tstream_cli_np);
498 cli_close_recv(subreq);
503 /* return the original error */
504 _tevent_req_error(req, state->error.val, state->error.location);
507 static int tstream_cli_np_writev_recv(struct tevent_req *req,
510 struct tstream_cli_np_writev_state *state =
512 struct tstream_cli_np_writev_state);
515 ret = tsocket_simple_int_recv(req, perrno);
520 tevent_req_received(req);
524 struct tstream_cli_np_readv_state {
525 struct tstream_context *stream;
526 struct tevent_context *ev;
528 struct iovec *vector;
534 struct tevent_immediate *im;
539 const char *location;
543 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
545 struct tstream_cli_np *cli_nps =
546 tstream_context_data(state->stream,
547 struct tstream_cli_np);
549 cli_nps->trans.read_req = NULL;
554 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
556 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
557 struct tevent_context *ev,
558 struct tstream_context *stream,
559 struct iovec *vector,
562 struct tevent_req *req;
563 struct tstream_cli_np_readv_state *state;
564 struct tstream_cli_np *cli_nps =
565 tstream_context_data(stream, struct tstream_cli_np);
567 req = tevent_req_create(mem_ctx, &state,
568 struct tstream_cli_np_readv_state);
572 state->stream = stream;
576 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
578 if (!cli_state_is_connected(cli_nps->cli)) {
579 tevent_req_error(req, ENOTCONN);
580 return tevent_req_post(req, ev);
584 * we make a copy of the vector so we can change the structure
586 state->vector = talloc_array(state, struct iovec, count);
587 if (tevent_req_nomem(state->vector, req)) {
588 return tevent_req_post(req, ev);
590 memcpy(state->vector, vector, sizeof(struct iovec) * count);
591 state->count = count;
593 tstream_cli_np_readv_read_next(req);
594 if (!tevent_req_is_in_progress(req)) {
595 return tevent_req_post(req, ev);
601 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
603 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
605 struct tstream_cli_np_readv_state *state =
607 struct tstream_cli_np_readv_state);
608 struct tstream_cli_np *cli_nps =
609 tstream_context_data(state->stream,
610 struct tstream_cli_np);
611 struct tevent_req *subreq;
614 * copy the pending buffer first
616 while (cli_nps->read.left > 0 && state->count > 0) {
617 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
618 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
620 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
623 state->vector[0].iov_base = base;
624 state->vector[0].iov_len -= len;
626 cli_nps->read.ofs += len;
627 cli_nps->read.left -= len;
629 if (state->vector[0].iov_len == 0) {
637 if (state->count == 0) {
638 tevent_req_done(req);
642 if (cli_nps->trans.active) {
643 cli_nps->trans.active = false;
644 cli_nps->trans.read_req = req;
648 if (cli_nps->trans.write_req) {
649 cli_nps->trans.read_req = req;
650 tstream_cli_np_readv_trans_start(req);
654 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
655 cli_nps->fnum, 0, TSTREAM_CLI_NP_BUF_SIZE);
656 if (tevent_req_nomem(subreq, req)) {
659 tevent_req_set_callback(subreq,
660 tstream_cli_np_readv_read_done,
664 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
666 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
668 struct tstream_cli_np_readv_state *state =
670 struct tstream_cli_np_readv_state);
671 struct tstream_cli_np *cli_nps =
672 tstream_context_data(state->stream,
673 struct tstream_cli_np);
674 struct tevent_req *subreq;
676 state->trans.im = tevent_create_immediate(state);
677 if (tevent_req_nomem(state->trans.im, req)) {
681 subreq = cli_trans_send(state, state->ev,
686 cli_nps->trans.setup, 2,
691 TSTREAM_CLI_NP_BUF_SIZE);
692 if (tevent_req_nomem(subreq, req)) {
695 tevent_req_set_callback(subreq,
696 tstream_cli_np_readv_trans_done,
700 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
702 const char *location);
703 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
704 struct tevent_immediate *im,
707 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
709 struct tevent_req *req =
710 tevent_req_callback_data(subreq, struct tevent_req);
711 struct tstream_cli_np_readv_state *state =
712 tevent_req_data(req, struct tstream_cli_np_readv_state);
713 struct tstream_cli_np *cli_nps =
714 tstream_context_data(state->stream, struct tstream_cli_np);
719 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
721 &rcvbuf, 0, &received);
723 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
724 status = NT_STATUS_OK;
726 if (!NT_STATUS_IS_OK(status)) {
727 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
731 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
732 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
737 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
741 cli_nps->read.ofs = 0;
742 cli_nps->read.left = received;
743 memcpy(cli_nps->read.buf, rcvbuf, received);
746 if (cli_nps->trans.write_req == NULL) {
747 tstream_cli_np_readv_read_next(req);
751 tevent_schedule_immediate(state->trans.im, state->ev,
752 tstream_cli_np_readv_trans_next, req);
754 tevent_req_done(cli_nps->trans.write_req);
757 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
758 struct tevent_immediate *im,
761 struct tevent_req *req =
762 talloc_get_type_abort(private_data,
765 tstream_cli_np_readv_read_next(req);
768 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
770 struct tevent_req *req =
771 tevent_req_callback_data(subreq, struct tevent_req);
772 struct tstream_cli_np_readv_state *state =
773 tevent_req_data(req, struct tstream_cli_np_readv_state);
774 struct tstream_cli_np *cli_nps =
775 tstream_context_data(state->stream, struct tstream_cli_np);
781 * We must free subreq in this function as there is
782 * a timer event attached to it.
785 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
787 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
790 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
792 * NT_STATUS_BUFFER_TOO_SMALL means that there's
793 * more data to read when the named pipe is used
794 * in message mode (which is the case here).
796 * But we hide this from the caller.
798 status = NT_STATUS_OK;
800 if (!NT_STATUS_IS_OK(status)) {
802 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
806 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
808 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
814 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
818 cli_nps->read.ofs = 0;
819 cli_nps->read.left = received;
820 memcpy(cli_nps->read.buf, rcvbuf, received);
823 tstream_cli_np_readv_read_next(req);
826 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
828 static void tstream_cli_np_readv_error(struct tevent_req *req);
830 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
832 const char *location)
834 struct tstream_cli_np_readv_state *state =
836 struct tstream_cli_np_readv_state);
837 struct tstream_cli_np *cli_nps =
838 tstream_context_data(state->stream,
839 struct tstream_cli_np);
840 struct tevent_req *subreq;
842 state->error.val = error;
843 state->error.location = location;
845 if (!cli_state_is_connected(cli_nps->cli)) {
846 /* return the original error */
847 tstream_cli_np_readv_error(req);
851 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
852 if (subreq == NULL) {
853 /* return the original error */
854 tstream_cli_np_readv_error(req);
857 tevent_req_set_callback(subreq,
858 tstream_cli_np_readv_disconnect_done,
862 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
864 struct tevent_req *req =
865 tevent_req_callback_data(subreq, struct tevent_req);
866 struct tstream_cli_np_readv_state *state =
867 tevent_req_data(req, struct tstream_cli_np_readv_state);
868 struct tstream_cli_np *cli_nps =
869 tstream_context_data(state->stream, struct tstream_cli_np);
871 cli_close_recv(subreq);
876 tstream_cli_np_readv_error(req);
879 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
880 struct tevent_immediate *im,
883 static void tstream_cli_np_readv_error(struct tevent_req *req)
885 struct tstream_cli_np_readv_state *state =
887 struct tstream_cli_np_readv_state);
888 struct tstream_cli_np *cli_nps =
889 tstream_context_data(state->stream,
890 struct tstream_cli_np);
892 if (cli_nps->trans.write_req == NULL) {
893 /* return the original error */
894 _tevent_req_error(req, state->error.val, state->error.location);
898 if (state->trans.im == NULL) {
899 /* return the original error */
900 _tevent_req_error(req, state->error.val, state->error.location);
904 tevent_schedule_immediate(state->trans.im, state->ev,
905 tstream_cli_np_readv_error_trigger, req);
907 /* return the original error for writev */
908 _tevent_req_error(cli_nps->trans.write_req,
909 state->error.val, state->error.location);
912 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
913 struct tevent_immediate *im,
916 struct tevent_req *req =
917 talloc_get_type_abort(private_data,
919 struct tstream_cli_np_readv_state *state =
921 struct tstream_cli_np_readv_state);
923 /* return the original error */
924 _tevent_req_error(req, state->error.val, state->error.location);
927 static int tstream_cli_np_readv_recv(struct tevent_req *req,
930 struct tstream_cli_np_readv_state *state =
931 tevent_req_data(req, struct tstream_cli_np_readv_state);
934 ret = tsocket_simple_int_recv(req, perrno);
939 tevent_req_received(req);
943 struct tstream_cli_np_disconnect_state {
944 struct tstream_context *stream;
947 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
949 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
950 struct tevent_context *ev,
951 struct tstream_context *stream)
953 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
954 struct tstream_cli_np);
955 struct tevent_req *req;
956 struct tstream_cli_np_disconnect_state *state;
957 struct tevent_req *subreq;
959 req = tevent_req_create(mem_ctx, &state,
960 struct tstream_cli_np_disconnect_state);
965 state->stream = stream;
967 if (!cli_state_is_connected(cli_nps->cli)) {
968 tevent_req_error(req, ENOTCONN);
969 return tevent_req_post(req, ev);
972 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
973 if (tevent_req_nomem(subreq, req)) {
974 return tevent_req_post(req, ev);
976 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
981 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
983 struct tevent_req *req = tevent_req_callback_data(subreq,
985 struct tstream_cli_np_disconnect_state *state =
986 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
987 struct tstream_cli_np *cli_nps =
988 tstream_context_data(state->stream, struct tstream_cli_np);
991 status = cli_close_recv(subreq);
993 if (!NT_STATUS_IS_OK(status)) {
994 tevent_req_error(req, EIO);
1000 tevent_req_done(req);
1003 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1008 ret = tsocket_simple_int_recv(req, perrno);
1010 tevent_req_received(req);
1014 static const struct tstream_context_ops tstream_cli_np_ops = {
1017 .pending_bytes = tstream_cli_np_pending_bytes,
1019 .readv_send = tstream_cli_np_readv_send,
1020 .readv_recv = tstream_cli_np_readv_recv,
1022 .writev_send = tstream_cli_np_writev_send,
1023 .writev_recv = tstream_cli_np_writev_recv,
1025 .disconnect_send = tstream_cli_np_disconnect_send,
1026 .disconnect_recv = tstream_cli_np_disconnect_recv,
1029 NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1030 struct cli_state *cli,
1032 struct tstream_context **_stream,
1033 const char *location)
1035 struct tstream_context *stream;
1036 struct tstream_cli_np *cli_nps;
1038 stream = tstream_context_create(mem_ctx,
1039 &tstream_cli_np_ops,
1041 struct tstream_cli_np,
1044 return NT_STATUS_NO_MEMORY;
1046 ZERO_STRUCTP(cli_nps);
1049 cli_nps->fnum = fnum;
1052 return NT_STATUS_OK;