2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state *cli)
35 uint32_t useable_space = 0;
37 data_offset = HDR_VWV;
38 data_offset += wct * sizeof(uint16_t);
39 data_offset += sizeof(uint16_t); /* byte count */
40 data_offset += 1; /* pad */
42 min_space = cli_state_available_size(cli, data_offset);
44 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP) {
45 useable_space = 0xFFFFFF - data_offset;
47 if (smb1cli_conn_signing_is_active(cli->conn)) {
51 if (smb1cli_conn_encryption_on(cli->conn)) {
56 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_READX) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space = 0x1FFFF - data_offset;
62 useable_space = MIN(useable_space, UINT16_MAX);
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state *cli,
79 uint32_t useable_space = 0;
81 data_offset = HDR_VWV;
82 data_offset += wct * sizeof(uint16_t);
83 data_offset += sizeof(uint16_t); /* byte count */
84 data_offset += 1; /* pad */
86 min_space = cli_state_available_size(cli, data_offset);
88 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) {
89 useable_space = 0xFFFFFF - data_offset;
90 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_WRITEX) {
91 useable_space = 0x1FFFF - data_offset;
96 if (write_mode != 0) {
100 if (smb1cli_conn_signing_is_active(cli->conn)) {
104 if (smb1cli_conn_encryption_on(cli->conn)) {
108 if (strequal(cli->dev, "LPT1:")) {
112 return useable_space;
115 struct cli_read_andx_state {
123 static void cli_read_andx_done(struct tevent_req *subreq);
125 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
126 struct tevent_context *ev,
127 struct cli_state *cli, uint16_t fnum,
128 off_t offset, size_t size,
129 struct tevent_req **psmbreq)
131 struct tevent_req *req, *subreq;
132 struct cli_read_andx_state *state;
135 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
141 SCVAL(state->vwv + 0, 0, 0xFF);
142 SCVAL(state->vwv + 0, 1, 0);
143 SSVAL(state->vwv + 1, 0, 0);
144 SSVAL(state->vwv + 2, 0, fnum);
145 SIVAL(state->vwv + 3, 0, offset);
146 SSVAL(state->vwv + 5, 0, size);
147 SSVAL(state->vwv + 6, 0, size);
148 SSVAL(state->vwv + 7, 0, (size >> 16));
149 SSVAL(state->vwv + 8, 0, 0);
150 SSVAL(state->vwv + 9, 0, 0);
152 if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) {
153 SIVAL(state->vwv + 10, 0,
154 (((uint64_t)offset)>>32) & 0xffffffff);
157 if ((((uint64_t)offset) & 0xffffffff00000000LL) != 0) {
158 DEBUG(10, ("cli_read_andx_send got large offset where "
159 "the server does not support it\n"));
160 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
161 return tevent_req_post(req, ev);
165 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
166 state->vwv, 0, NULL);
167 if (subreq == NULL) {
171 tevent_req_set_callback(subreq, cli_read_andx_done, req);
176 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
177 struct tevent_context *ev,
178 struct cli_state *cli, uint16_t fnum,
179 off_t offset, size_t size)
181 struct tevent_req *req, *subreq;
184 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
190 status = smb1cli_req_chain_submit(&subreq, 1);
191 if (tevent_req_nterror(req, status)) {
192 return tevent_req_post(req, ev);
197 static void cli_read_andx_done(struct tevent_req *subreq)
199 struct tevent_req *req = tevent_req_callback_data(
200 subreq, struct tevent_req);
201 struct cli_read_andx_state *state = tevent_req_data(
202 req, struct cli_read_andx_state);
209 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
212 if (NT_STATUS_IS_ERR(state->status)) {
213 tevent_req_nterror(req, state->status);
217 /* size is the number of bytes the server returned.
219 state->received = SVAL(vwv + 5, 0);
220 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
222 if (state->received > state->size) {
223 DEBUG(5,("server returned more than we wanted!\n"));
224 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
229 * bcc field must be valid for small reads, for large reads the 16-bit
230 * bcc field can't be correct.
233 if ((state->received < 0xffff) && (state->received > num_bytes)) {
234 DEBUG(5, ("server announced more bytes than sent\n"));
235 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
239 state->buf = discard_const_p(uint8_t, smb_base(inbuf)) + SVAL(vwv+6, 0);
241 if (trans_oob(smb_len_tcp(inbuf), SVAL(vwv+6, 0), state->received)
242 || ((state->received != 0) && (state->buf < bytes))) {
243 DEBUG(5, ("server returned invalid read&x data offset\n"));
244 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
247 tevent_req_done(req);
251 * Pull the data out of a finished async read_and_x request. rcvbuf is
252 * talloced from the request, so better make sure that you copy it away before
253 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
257 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
260 struct cli_read_andx_state *state = tevent_req_data(
261 req, struct cli_read_andx_state);
264 if (tevent_req_is_nterror(req, &status)) {
267 *received = state->received;
268 *rcvbuf = state->buf;
272 struct cli_pull_chunk;
274 struct cli_pull_state {
275 struct tevent_context *ev;
276 struct cli_state *cli;
281 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
289 * How many bytes did we push into "sink"?
294 * Outstanding requests
296 * The maximum is 256:
297 * - which would be a window of 256 MByte
298 * for SMB2 with multi-credit
299 * or smb1 unix extentions.
303 uint16_t num_waiting;
304 struct cli_pull_chunk *chunks;
307 struct cli_pull_chunk {
308 struct cli_pull_chunk *prev, *next;
309 struct tevent_req *req;/* This is the main request! Not the subreq */
310 struct tevent_req *subreq;
318 static void cli_pull_setup_chunks(struct tevent_req *req);
319 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk);
320 static void cli_pull_chunk_done(struct tevent_req *subreq);
323 * Parallel read support.
325 * cli_pull sends as many read&x requests as the server would allow via
326 * max_mux at a time. When replies flow back in, the data is written into
327 * the callback function "sink" in the right order.
330 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
331 struct tevent_context *ev,
332 struct cli_state *cli,
333 uint16_t fnum, off_t start_offset,
334 off_t size, size_t window_size,
335 NTSTATUS (*sink)(char *buf, size_t n,
339 struct tevent_req *req;
340 struct cli_pull_state *state;
341 size_t page_size = 1024;
344 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
351 state->start_offset = start_offset;
355 state->next_offset = start_offset;
356 state->remaining = size;
359 tevent_req_done(req);
360 return tevent_req_post(req, ev);
363 state->chunk_size = cli_read_max_bufsize(cli);
364 if (state->chunk_size > page_size) {
365 state->chunk_size &= ~(page_size - 1);
368 if (window_size == 0) {
370 * We use 16 MByte as default window size.
372 window_size = 16 * 1024 * 1024;
375 tmp64 = window_size/state->chunk_size;
376 if ((window_size % state->chunk_size) > 0) {
379 tmp64 = MAX(tmp64, 1);
380 tmp64 = MIN(tmp64, 256);
381 state->max_chunks = tmp64;
384 * We defer the callback because of the complex
385 * substate/subfunction logic
387 tevent_req_defer_callback(req, ev);
389 cli_pull_setup_chunks(req);
390 if (!tevent_req_is_in_progress(req)) {
391 return tevent_req_post(req, ev);
397 static void cli_pull_setup_chunks(struct tevent_req *req)
399 struct cli_pull_state *state =
401 struct cli_pull_state);
402 struct cli_pull_chunk *chunk, *next = NULL;
405 for (chunk = state->chunks; chunk; chunk = next) {
407 * Note that chunk might be removed from this call.
410 cli_pull_chunk_ship(chunk);
411 if (!tevent_req_is_in_progress(req)) {
416 for (i = state->num_chunks; i < state->max_chunks; i++) {
418 if (state->num_waiting > 0) {
422 if (state->remaining == 0) {
426 chunk = talloc_zero(state, struct cli_pull_chunk);
427 if (tevent_req_nomem(chunk, req)) {
431 chunk->ofs = state->next_offset;
432 chunk->total_size = MIN(state->remaining, state->chunk_size);
433 state->next_offset += chunk->total_size;
434 state->remaining -= chunk->total_size;
436 DLIST_ADD_END(state->chunks, chunk, NULL);
438 state->num_waiting++;
440 cli_pull_chunk_ship(chunk);
441 if (!tevent_req_is_in_progress(req)) {
446 if (state->remaining > 0) {
450 if (state->num_chunks > 0) {
454 tevent_req_done(req);
457 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk)
459 struct tevent_req *req = chunk->req;
460 struct cli_pull_state *state =
462 struct cli_pull_state);
470 if (chunk != state->chunks) {
472 * this chunk is not the
473 * first one in the list.
475 * which means we should not
476 * push it into the sink yet.
481 if (chunk->tmp_size == 0) {
483 * we git a short read, we're done
485 tevent_req_done(req);
489 status = state->sink((char *)chunk->buf,
492 if (tevent_req_nterror(req, status)) {
495 state->pushed += chunk->tmp_size;
497 if (chunk->tmp_size < chunk->total_size) {
499 * we git a short read, we're done
501 tevent_req_done(req);
505 DLIST_REMOVE(state->chunks, chunk);
506 SMB_ASSERT(state->num_chunks > 0);
513 if (chunk->subreq != NULL) {
517 SMB_ASSERT(state->num_waiting > 0);
519 ofs = chunk->ofs + chunk->tmp_size;
520 size = chunk->total_size - chunk->tmp_size;
522 ok = smb1cli_conn_req_possible(state->cli->conn);
527 chunk->subreq = cli_read_andx_send(chunk,
533 if (tevent_req_nomem(chunk->subreq, req)) {
536 tevent_req_set_callback(chunk->subreq,
540 state->num_waiting--;
544 static void cli_pull_chunk_done(struct tevent_req *subreq)
546 struct cli_pull_chunk *chunk =
547 tevent_req_callback_data(subreq,
548 struct cli_pull_chunk);
549 struct tevent_req *req = chunk->req;
550 struct cli_pull_state *state =
552 struct cli_pull_state);
554 size_t expected = chunk->total_size - chunk->tmp_size;
558 chunk->subreq = NULL;
560 status = cli_read_andx_recv(subreq, &received, &buf);
561 if (NT_STATUS_EQUAL(status, NT_STATUS_END_OF_FILE)) {
563 status = NT_STATUS_OK;
565 if (tevent_req_nterror(req, status)) {
569 if (received > expected) {
570 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
576 * We got EOF we're done
579 cli_pull_setup_chunks(req);
583 if (received == chunk->total_size) {
585 * We got it in the first run.
587 * We don't call TALLOC_FREE(subreq)
588 * here and keep the returned buffer.
591 } else if (chunk->buf == NULL) {
592 chunk->buf = talloc_array(chunk, uint8_t, chunk->total_size);
593 if (tevent_req_nomem(chunk->buf, req)) {
598 if (received != chunk->total_size) {
599 uint8_t *p = chunk->buf + chunk->tmp_size;
600 memcpy(p, buf, received);
604 chunk->tmp_size += received;
606 if (chunk->tmp_size == chunk->total_size) {
609 state->num_waiting++;
612 cli_pull_setup_chunks(req);
615 NTSTATUS cli_pull_recv(struct tevent_req *req, off_t *received)
617 struct cli_pull_state *state = tevent_req_data(
618 req, struct cli_pull_state);
621 if (tevent_req_is_nterror(req, &status)) {
622 tevent_req_received(req);
625 *received = state->pushed;
626 tevent_req_received(req);
630 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
631 off_t start_offset, off_t size, size_t window_size,
632 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
633 void *priv, off_t *received)
635 TALLOC_CTX *frame = talloc_stackframe();
636 struct tevent_context *ev;
637 struct tevent_req *req;
638 NTSTATUS status = NT_STATUS_OK;
640 if (smbXcli_conn_has_async_calls(cli->conn)) {
642 * Can't use sync call while an async call is in flight
644 status = NT_STATUS_INVALID_PARAMETER;
648 ev = samba_tevent_context_init(frame);
650 status = NT_STATUS_NO_MEMORY;
654 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
655 window_size, sink, priv);
657 status = NT_STATUS_NO_MEMORY;
661 if (!tevent_req_poll(req, ev)) {
662 status = map_nt_error_from_unix(errno);
666 status = cli_pull_recv(req, received);
672 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
674 char **pbuf = (char **)priv;
675 memcpy(*pbuf, buf, n);
680 NTSTATUS cli_read(struct cli_state *cli, uint16_t fnum,
681 char *buf, off_t offset, size_t size,
687 status = cli_pull(cli, fnum, offset, size, size,
688 cli_read_sink, &buf, &ret);
689 if (!NT_STATUS_IS_OK(status)) {
700 /****************************************************************************
701 write to a file using a SMBwrite and not bypassing 0 byte writes
702 ****************************************************************************/
704 NTSTATUS cli_smbwrite(struct cli_state *cli, uint16_t fnum, char *buf,
705 off_t offset, size_t size1, size_t *ptotal)
714 bytes = talloc_array(talloc_tos(), uint8_t, 3);
716 return NT_STATUS_NO_MEMORY;
721 uint32_t usable_space = cli_state_available_size(cli, 48);
722 size_t size = MIN(size1, usable_space);
723 struct tevent_req *req;
728 SSVAL(vwv+0, 0, fnum);
729 SSVAL(vwv+1, 0, size);
730 SIVAL(vwv+2, 0, offset);
733 bytes = talloc_realloc(talloc_tos(), bytes, uint8_t,
736 return NT_STATUS_NO_MEMORY;
738 SSVAL(bytes, 1, size);
739 memcpy(bytes + 3, buf + total, size);
741 status = cli_smb(talloc_tos(), cli, SMBwrite, 0, 5, vwv,
742 size+3, bytes, &req, 1, NULL, &ret_vwv,
744 if (!NT_STATUS_IS_OK(status)) {
749 size = SVAL(ret_vwv+0, 0);
762 if (ptotal != NULL) {
769 * Send a write&x request
772 struct cli_write_andx_state {
780 static void cli_write_andx_done(struct tevent_req *subreq);
782 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
783 struct tevent_context *ev,
784 struct cli_state *cli, uint16_t fnum,
785 uint16_t mode, const uint8_t *buf,
786 off_t offset, size_t size,
787 struct tevent_req **reqs_before,
789 struct tevent_req **psmbreq)
791 struct tevent_req *req, *subreq;
792 struct cli_write_andx_state *state;
793 bool bigoffset = ((smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) != 0);
794 uint8_t wct = bigoffset ? 14 : 12;
795 size_t max_write = cli_write_max_bufsize(cli, mode, wct);
798 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
803 state->size = MIN(size, max_write);
807 SCVAL(vwv+0, 0, 0xFF);
810 SSVAL(vwv+2, 0, fnum);
811 SIVAL(vwv+3, 0, offset);
813 SSVAL(vwv+7, 0, mode);
815 SSVAL(vwv+9, 0, (state->size>>16));
816 SSVAL(vwv+10, 0, state->size);
819 smb1cli_req_wct_ofs(reqs_before, num_reqs_before)
820 + 1 /* the wct field */
822 + 2 /* num_bytes field */
826 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
830 state->iov[0].iov_base = (void *)&state->pad;
831 state->iov[0].iov_len = 1;
832 state->iov[1].iov_base = discard_const_p(void, buf);
833 state->iov[1].iov_len = state->size;
835 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
837 if (tevent_req_nomem(subreq, req)) {
838 return tevent_req_post(req, ev);
840 tevent_req_set_callback(subreq, cli_write_andx_done, req);
845 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
846 struct tevent_context *ev,
847 struct cli_state *cli, uint16_t fnum,
848 uint16_t mode, const uint8_t *buf,
849 off_t offset, size_t size)
851 struct tevent_req *req, *subreq;
854 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
855 size, NULL, 0, &subreq);
860 status = smb1cli_req_chain_submit(&subreq, 1);
861 if (tevent_req_nterror(req, status)) {
862 return tevent_req_post(req, ev);
867 static void cli_write_andx_done(struct tevent_req *subreq)
869 struct tevent_req *req = tevent_req_callback_data(
870 subreq, struct tevent_req);
871 struct cli_write_andx_state *state = tevent_req_data(
872 req, struct cli_write_andx_state);
877 status = cli_smb_recv(subreq, state, NULL, 6, &wct, &vwv,
880 if (NT_STATUS_IS_ERR(status)) {
881 tevent_req_nterror(req, status);
884 state->written = SVAL(vwv+2, 0);
885 if (state->size > UINT16_MAX) {
887 * It is important that we only set the
888 * high bits only if we asked for a large write.
890 * OS/2 print shares get this wrong and may send
895 state->written |= SVAL(vwv+4, 0)<<16;
897 tevent_req_done(req);
900 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
902 struct cli_write_andx_state *state = tevent_req_data(
903 req, struct cli_write_andx_state);
906 if (tevent_req_is_nterror(req, &status)) {
910 *pwritten = state->written;
915 struct cli_writeall_state {
916 struct tevent_context *ev;
917 struct cli_state *cli;
926 static void cli_writeall_written(struct tevent_req *req);
928 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
929 struct tevent_context *ev,
930 struct cli_state *cli,
934 off_t offset, size_t size)
936 struct tevent_req *req, *subreq;
937 struct cli_writeall_state *state;
939 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
948 state->offset = offset;
952 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
953 state->mode, state->buf, state->offset,
955 if (tevent_req_nomem(subreq, req)) {
956 return tevent_req_post(req, ev);
958 tevent_req_set_callback(subreq, cli_writeall_written, req);
962 static void cli_writeall_written(struct tevent_req *subreq)
964 struct tevent_req *req = tevent_req_callback_data(
965 subreq, struct tevent_req);
966 struct cli_writeall_state *state = tevent_req_data(
967 req, struct cli_writeall_state);
969 size_t written, to_write;
971 status = cli_write_andx_recv(subreq, &written);
973 if (tevent_req_nterror(req, status)) {
977 state->written += written;
979 if (state->written > state->size) {
980 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
984 to_write = state->size - state->written;
987 tevent_req_done(req);
991 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
993 state->buf + state->written,
994 state->offset + state->written, to_write);
995 if (tevent_req_nomem(subreq, req)) {
998 tevent_req_set_callback(subreq, cli_writeall_written, req);
1001 static NTSTATUS cli_writeall_recv(struct tevent_req *req,
1004 struct cli_writeall_state *state = tevent_req_data(
1005 req, struct cli_writeall_state);
1008 if (tevent_req_is_nterror(req, &status)) {
1011 if (pwritten != NULL) {
1012 *pwritten = state->written;
1014 return NT_STATUS_OK;
1017 NTSTATUS cli_writeall(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1018 const uint8_t *buf, off_t offset, size_t size,
1021 TALLOC_CTX *frame = talloc_stackframe();
1022 struct tevent_context *ev;
1023 struct tevent_req *req;
1024 NTSTATUS status = NT_STATUS_NO_MEMORY;
1026 if (smbXcli_conn_has_async_calls(cli->conn)) {
1028 * Can't use sync call while an async call is in flight
1030 status = NT_STATUS_INVALID_PARAMETER;
1033 ev = samba_tevent_context_init(frame);
1037 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size);
1041 if (!tevent_req_poll(req, ev)) {
1042 status = map_nt_error_from_unix(errno);
1045 status = cli_writeall_recv(req, pwritten);
1051 struct cli_push_chunk;
1053 struct cli_push_state {
1054 struct tevent_context *ev;
1055 struct cli_state *cli;
1060 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1069 * Outstanding requests
1071 * The maximum is 256:
1072 * - which would be a window of 256 MByte
1073 * for SMB2 with multi-credit
1074 * or smb1 unix extentions.
1076 uint16_t max_chunks;
1077 uint16_t num_chunks;
1078 uint16_t num_waiting;
1079 struct cli_push_chunk *chunks;
1082 struct cli_push_chunk {
1083 struct cli_push_chunk *prev, *next;
1084 struct tevent_req *req;/* This is the main request! Not the subreq */
1085 struct tevent_req *subreq;
1093 static void cli_push_setup_chunks(struct tevent_req *req);
1094 static void cli_push_chunk_ship(struct cli_push_chunk *chunk);
1095 static void cli_push_chunk_done(struct tevent_req *subreq);
1097 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1098 struct cli_state *cli,
1099 uint16_t fnum, uint16_t mode,
1100 off_t start_offset, size_t window_size,
1101 size_t (*source)(uint8_t *buf, size_t n,
1105 struct tevent_req *req;
1106 struct cli_push_state *state;
1107 size_t page_size = 1024;
1110 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1117 state->start_offset = start_offset;
1119 state->source = source;
1121 state->next_offset = start_offset;
1123 state->chunk_size = cli_write_max_bufsize(cli, mode, 14);
1124 if (state->chunk_size > page_size) {
1125 state->chunk_size &= ~(page_size - 1);
1128 if (window_size == 0) {
1130 * We use 16 MByte as default window size.
1132 window_size = 16 * 1024 * 1024;
1135 tmp64 = window_size/state->chunk_size;
1136 if ((window_size % state->chunk_size) > 0) {
1139 tmp64 = MAX(tmp64, 1);
1140 tmp64 = MIN(tmp64, 256);
1141 state->max_chunks = tmp64;
1144 * We defer the callback because of the complex
1145 * substate/subfunction logic
1147 tevent_req_defer_callback(req, ev);
1149 cli_push_setup_chunks(req);
1150 if (!tevent_req_is_in_progress(req)) {
1151 return tevent_req_post(req, ev);
1157 static void cli_push_setup_chunks(struct tevent_req *req)
1159 struct cli_push_state *state =
1160 tevent_req_data(req,
1161 struct cli_push_state);
1162 struct cli_push_chunk *chunk, *next = NULL;
1165 for (chunk = state->chunks; chunk; chunk = next) {
1167 * Note that chunk might be removed from this call.
1170 cli_push_chunk_ship(chunk);
1171 if (!tevent_req_is_in_progress(req)) {
1176 for (i = state->num_chunks; i < state->max_chunks; i++) {
1178 if (state->num_waiting > 0) {
1186 chunk = talloc_zero(state, struct cli_push_chunk);
1187 if (tevent_req_nomem(chunk, req)) {
1191 chunk->ofs = state->next_offset;
1192 chunk->buf = talloc_array(chunk,
1195 if (tevent_req_nomem(chunk->buf, req)) {
1198 chunk->total_size = state->source(chunk->buf,
1201 if (chunk->total_size == 0) {
1202 /* nothing to send */
1207 state->next_offset += chunk->total_size;
1209 DLIST_ADD_END(state->chunks, chunk, NULL);
1210 state->num_chunks++;
1211 state->num_waiting++;
1213 cli_push_chunk_ship(chunk);
1214 if (!tevent_req_is_in_progress(req)) {
1223 if (state->num_chunks > 0) {
1227 tevent_req_done(req);
1230 static void cli_push_chunk_ship(struct cli_push_chunk *chunk)
1232 struct tevent_req *req = chunk->req;
1233 struct cli_push_state *state =
1234 tevent_req_data(req,
1235 struct cli_push_state);
1242 DLIST_REMOVE(state->chunks, chunk);
1243 SMB_ASSERT(state->num_chunks > 0);
1244 state->num_chunks--;
1250 if (chunk->subreq != NULL) {
1254 SMB_ASSERT(state->num_waiting > 0);
1256 buf = chunk->buf + chunk->tmp_size;
1257 ofs = chunk->ofs + chunk->tmp_size;
1258 size = chunk->total_size - chunk->tmp_size;
1260 ok = smb1cli_conn_req_possible(state->cli->conn);
1265 chunk->subreq = cli_write_andx_send(chunk,
1273 if (tevent_req_nomem(chunk->subreq, req)) {
1276 tevent_req_set_callback(chunk->subreq,
1277 cli_push_chunk_done,
1280 state->num_waiting--;
1284 static void cli_push_chunk_done(struct tevent_req *subreq)
1286 struct cli_push_chunk *chunk =
1287 tevent_req_callback_data(subreq,
1288 struct cli_push_chunk);
1289 struct tevent_req *req = chunk->req;
1290 struct cli_push_state *state =
1291 tevent_req_data(req,
1292 struct cli_push_state);
1294 size_t expected = chunk->total_size - chunk->tmp_size;
1297 chunk->subreq = NULL;
1299 status = cli_write_andx_recv(subreq, &written);
1300 TALLOC_FREE(subreq);
1301 if (tevent_req_nterror(req, status)) {
1305 if (written > expected) {
1306 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1311 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1315 chunk->tmp_size += written;
1317 if (chunk->tmp_size == chunk->total_size) {
1320 state->num_waiting++;
1323 cli_push_setup_chunks(req);
1326 NTSTATUS cli_push_recv(struct tevent_req *req)
1328 return tevent_req_simple_recv_ntstatus(req);
1331 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1332 off_t start_offset, size_t window_size,
1333 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1336 TALLOC_CTX *frame = talloc_stackframe();
1337 struct tevent_context *ev;
1338 struct tevent_req *req;
1339 NTSTATUS status = NT_STATUS_OK;
1341 if (smbXcli_conn_has_async_calls(cli->conn)) {
1343 * Can't use sync call while an async call is in flight
1345 status = NT_STATUS_INVALID_PARAMETER;
1349 ev = samba_tevent_context_init(frame);
1351 status = NT_STATUS_NO_MEMORY;
1355 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1356 window_size, source, priv);
1358 status = NT_STATUS_NO_MEMORY;
1362 if (!tevent_req_poll(req, ev)) {
1363 status = map_nt_error_from_unix(errno);
1367 status = cli_push_recv(req);