2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "../libcli/smb/smb_common.h"
28 #include "../libcli/smb/smb_seal.h"
29 #include "../libcli/smb/smb_signing.h"
30 #include "../libcli/smb/read_smb.h"
31 #include "smbXcli_base.h"
32 #include "librpc/ndr/libndr.h"
36 struct sockaddr_storage local_ss;
37 struct sockaddr_storage remote_ss;
38 const char *remote_name;
40 struct tevent_queue *outgoing;
41 struct tevent_req **pending;
42 struct tevent_req *read_smb_req;
44 enum protocol_types protocol;
47 bool mandatory_signing;
50 * The incoming dispatch function should return:
51 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
52 * - NT_STATUS_OK, if no more processing is desired, e.g.
53 * the dispatch function called
55 * - All other return values disconnect the connection.
57 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
63 uint32_t capabilities;
68 uint32_t capabilities;
71 uint16_t security_mode;
80 const char *workgroup;
85 uint32_t capabilities;
90 struct smb_signing_state *signing;
91 struct smb_trans_enc_state *trans_enc;
96 uint16_t security_mode;
100 uint32_t capabilities;
101 uint16_t security_mode;
103 uint32_t max_trans_size;
104 uint32_t max_read_size;
105 uint32_t max_write_size;
115 struct smbXcli_req_state {
116 struct tevent_context *ev;
117 struct smbXcli_conn *conn;
119 uint8_t length_hdr[4];
126 /* Space for the header including the wct */
127 uint8_t hdr[HDR_VWV];
130 * For normal requests, smb1cli_req_send chooses a mid.
131 * SecondaryV trans requests need to use the mid of the primary
132 * request, so we need a place to store it.
133 * Assume it is set if != 0.
138 uint8_t bytecount_buf[2];
140 #define MAX_SMB_IOV 5
141 /* length_hdr, hdr, words, byte_count, buffers */
142 struct iovec iov[1 + 3 + MAX_SMB_IOV];
146 struct tevent_req **chained_requests;
149 NTSTATUS recv_status;
150 /* always an array of 3 talloc elements */
151 struct iovec *recv_iov;
155 const uint8_t *fixed;
161 uint8_t pad[7]; /* padding space for compounding */
163 /* always an array of 3 talloc elements */
164 struct iovec *recv_iov;
168 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
171 * NT_STATUS_OK, means we do not notify the callers
173 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
175 if (conn->smb1.trans_enc) {
176 common_free_encryption_state(&conn->smb1.trans_enc);
182 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
184 const char *remote_name,
185 enum smb_signing_setting signing_state,
186 uint32_t smb1_capabilities)
188 struct smbXcli_conn *conn = NULL;
190 struct sockaddr *sa = NULL;
194 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
199 conn->remote_name = talloc_strdup(conn, remote_name);
200 if (conn->remote_name == NULL) {
206 ss = (void *)&conn->local_ss;
207 sa = (struct sockaddr *)ss;
208 sa_length = sizeof(conn->local_ss);
209 ret = getsockname(fd, sa, &sa_length);
213 ss = (void *)&conn->remote_ss;
214 sa = (struct sockaddr *)ss;
215 sa_length = sizeof(conn->remote_ss);
216 ret = getpeername(fd, sa, &sa_length);
221 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
222 if (conn->outgoing == NULL) {
225 conn->pending = NULL;
227 conn->protocol = PROTOCOL_NONE;
229 switch (signing_state) {
230 case SMB_SIGNING_OFF:
232 conn->allow_signing = false;
233 conn->desire_signing = false;
234 conn->mandatory_signing = false;
236 case SMB_SIGNING_DEFAULT:
237 case SMB_SIGNING_IF_REQUIRED:
238 /* if the server requires it */
239 conn->allow_signing = true;
240 conn->desire_signing = false;
241 conn->mandatory_signing = false;
243 case SMB_SIGNING_REQUIRED:
245 conn->allow_signing = true;
246 conn->desire_signing = true;
247 conn->mandatory_signing = true;
251 conn->smb1.client.capabilities = smb1_capabilities;
252 conn->smb1.client.max_xmit = UINT16_MAX;
254 conn->smb1.capabilities = conn->smb1.client.capabilities;
255 conn->smb1.max_xmit = 1024;
259 /* initialise signing */
260 conn->smb1.signing = smb_signing_init(conn,
262 conn->desire_signing,
263 conn->mandatory_signing);
264 if (!conn->smb1.signing) {
268 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
269 if (conn->mandatory_signing) {
270 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
273 talloc_set_destructor(conn, smbXcli_conn_destructor);
281 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
287 if (conn->fd == -1) {
294 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
296 return conn->protocol;
299 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
301 if (conn->protocol >= PROTOCOL_SMB2_02) {
305 if (conn->smb1.capabilities & CAP_UNICODE) {
312 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
314 set_socket_options(conn->fd, options);
317 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
319 return &conn->local_ss;
322 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
324 return &conn->remote_ss;
327 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
329 return conn->remote_name;
332 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
333 const DATA_BLOB user_session_key,
334 const DATA_BLOB response)
336 return smb_signing_activate(conn->smb1.signing,
341 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
342 const uint8_t *buf, uint32_t seqnum)
344 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
347 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
349 return smb_signing_is_active(conn->smb1.signing);
352 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
353 struct smb_trans_enc_state *es)
355 /* Replace the old state, if any. */
356 if (conn->smb1.trans_enc) {
357 common_free_encryption_state(&conn->smb1.trans_enc);
359 conn->smb1.trans_enc = es;
362 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
364 return common_encryption_on(conn->smb1.trans_enc);
368 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
370 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
371 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
373 if (NT_STATUS_IS_OK(status)) {
377 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
381 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
385 * Figure out if there is an andx command behind the current one
386 * @param[in] buf The smb buffer to look at
387 * @param[in] ofs The offset to the wct field that is followed by the cmd
388 * @retval Is there a command following?
391 static bool smb1cli_have_andx_command(const uint8_t *buf,
396 size_t buflen = talloc_get_size(buf);
398 if (!smb1cli_is_andx_req(cmd)) {
402 if ((ofs == buflen-1) || (ofs == buflen)) {
406 wct = CVAL(buf, ofs);
409 * Not enough space for the command and a following pointer
413 return (CVAL(buf, ofs+1) != 0xff);
417 * Is the SMB command able to hold an AND_X successor
418 * @param[in] cmd The SMB command in question
419 * @retval Can we add a chained request after "cmd"?
421 bool smb1cli_is_andx_req(uint8_t cmd)
441 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
443 size_t num_pending = talloc_array_length(conn->pending);
449 result = conn->smb1.mid++;
450 if ((result == 0) || (result == 0xffff)) {
454 for (i=0; i<num_pending; i++) {
455 if (result == smb1cli_req_mid(conn->pending[i])) {
460 if (i == num_pending) {
466 void smbXcli_req_unset_pending(struct tevent_req *req)
468 struct smbXcli_req_state *state =
470 struct smbXcli_req_state);
471 struct smbXcli_conn *conn = state->conn;
472 size_t num_pending = talloc_array_length(conn->pending);
475 if (state->smb1.mid != 0) {
477 * This is a [nt]trans[2] request which waits
478 * for more than one reply.
483 talloc_set_destructor(req, NULL);
485 if (num_pending == 1) {
487 * The pending read_smb tevent_req is a child of
488 * conn->pending. So if nothing is pending anymore, we need to
489 * delete the socket read fde.
491 TALLOC_FREE(conn->pending);
492 conn->read_smb_req = NULL;
496 for (i=0; i<num_pending; i++) {
497 if (req == conn->pending[i]) {
501 if (i == num_pending) {
503 * Something's seriously broken. Just returning here is the
504 * right thing nevertheless, the point of this routine is to
505 * remove ourselves from conn->pending.
511 * Remove ourselves from the conn->pending array
513 for (; i < (num_pending - 1); i++) {
514 conn->pending[i] = conn->pending[i+1];
518 * No NULL check here, we're shrinking by sizeof(void *), and
519 * talloc_realloc just adjusts the size for this.
521 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
526 static int smbXcli_req_destructor(struct tevent_req *req)
528 struct smbXcli_req_state *state =
530 struct smbXcli_req_state);
533 * Make sure we really remove it from
534 * the pending array on destruction.
537 smbXcli_req_unset_pending(req);
541 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
543 bool smbXcli_req_set_pending(struct tevent_req *req)
545 struct smbXcli_req_state *state =
547 struct smbXcli_req_state);
548 struct smbXcli_conn *conn;
549 struct tevent_req **pending;
554 if (!smbXcli_conn_is_connected(conn)) {
558 num_pending = talloc_array_length(conn->pending);
560 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
562 if (pending == NULL) {
565 pending[num_pending] = req;
566 conn->pending = pending;
567 talloc_set_destructor(req, smbXcli_req_destructor);
569 if (!smbXcli_conn_receive_next(conn)) {
571 * the caller should notify the current request
573 * And all other pending requests get notified
574 * by smbXcli_conn_disconnect().
576 smbXcli_req_unset_pending(req);
577 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
584 static void smbXcli_conn_received(struct tevent_req *subreq);
586 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
588 size_t num_pending = talloc_array_length(conn->pending);
589 struct tevent_req *req;
590 struct smbXcli_req_state *state;
592 if (conn->read_smb_req != NULL) {
596 if (num_pending == 0) {
597 if (conn->smb2.mid < UINT64_MAX) {
598 /* no more pending requests, so we are done for now */
603 * If there are no more SMB2 requests possible,
604 * because we are out of message ids,
605 * we need to disconnect.
607 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
611 req = conn->pending[0];
612 state = tevent_req_data(req, struct smbXcli_req_state);
615 * We're the first ones, add the read_smb request that waits for the
616 * answer from the server
618 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
619 if (conn->read_smb_req == NULL) {
622 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
626 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
628 if (conn->fd != -1) {
634 * Cancel all pending requests. We do not do a for-loop walking
635 * conn->pending because that array changes in
636 * smbXcli_req_unset_pending.
638 while (talloc_array_length(conn->pending) > 0) {
639 struct tevent_req *req;
640 struct smbXcli_req_state *state;
642 req = conn->pending[0];
643 state = tevent_req_data(req, struct smbXcli_req_state);
646 * We're dead. No point waiting for trans2
651 smbXcli_req_unset_pending(req);
653 if (NT_STATUS_IS_OK(status)) {
654 /* do not notify the callers */
659 * we need to defer the callback, because we may notify more
662 tevent_req_defer_callback(req, state->ev);
663 tevent_req_nterror(req, status);
668 * Fetch a smb request's mid. Only valid after the request has been sent by
669 * smb1cli_req_send().
671 uint16_t smb1cli_req_mid(struct tevent_req *req)
673 struct smbXcli_req_state *state =
675 struct smbXcli_req_state);
677 if (state->smb1.mid != 0) {
678 return state->smb1.mid;
681 return SVAL(state->smb1.hdr, HDR_MID);
684 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
686 struct smbXcli_req_state *state =
688 struct smbXcli_req_state);
690 state->smb1.mid = mid;
693 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
695 struct smbXcli_req_state *state =
697 struct smbXcli_req_state);
699 return state->smb1.seqnum;
702 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
704 struct smbXcli_req_state *state =
706 struct smbXcli_req_state);
708 state->smb1.seqnum = seqnum;
711 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
715 for (i=0; i<count; i++) {
716 result += iov[i].iov_len;
721 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
722 const struct iovec *iov,
725 size_t len = smbXcli_iov_len(iov, count);
730 buf = talloc_array(mem_ctx, uint8_t, len);
735 for (i=0; i<count; i++) {
736 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
737 copied += iov[i].iov_len;
742 static void smb1cli_req_flags(enum protocol_types protocol,
743 uint32_t smb1_capabilities,
745 uint8_t additional_flags,
748 uint16_t additional_flags2,
749 uint16_t clear_flags2,
755 if (protocol >= PROTOCOL_LANMAN1) {
756 flags |= FLAG_CASELESS_PATHNAMES;
757 flags |= FLAG_CANONICAL_PATHNAMES;
760 if (protocol >= PROTOCOL_LANMAN2) {
761 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
762 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
765 if (protocol >= PROTOCOL_NT1) {
766 flags2 |= FLAGS2_IS_LONG_NAME;
768 if (smb1_capabilities & CAP_UNICODE) {
769 flags2 |= FLAGS2_UNICODE_STRINGS;
771 if (smb1_capabilities & CAP_STATUS32) {
772 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
774 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
775 flags2 |= FLAGS2_EXTENDED_SECURITY;
779 flags |= additional_flags;
780 flags &= ~clear_flags;
781 flags2 |= additional_flags2;
782 flags2 &= ~clear_flags2;
788 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
789 struct tevent_context *ev,
790 struct smbXcli_conn *conn,
792 uint8_t additional_flags,
794 uint16_t additional_flags2,
795 uint16_t clear_flags2,
796 uint32_t timeout_msec,
800 uint8_t wct, uint16_t *vwv,
802 struct iovec *bytes_iov)
804 struct tevent_req *req;
805 struct smbXcli_req_state *state;
809 if (iov_count > MAX_SMB_IOV) {
811 * Should not happen :-)
816 req = tevent_req_create(mem_ctx, &state,
817 struct smbXcli_req_state);
824 state->smb1.recv_cmd = 0xFF;
825 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
826 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
827 if (state->smb1.recv_iov == NULL) {
832 smb1cli_req_flags(conn->protocol,
833 conn->smb1.capabilities,
842 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
843 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
844 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
845 SCVAL(state->smb1.hdr, HDR_FLG, flags);
846 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
847 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
848 SSVAL(state->smb1.hdr, HDR_TID, tid);
849 SSVAL(state->smb1.hdr, HDR_PID, pid);
850 SSVAL(state->smb1.hdr, HDR_UID, uid);
851 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
852 SSVAL(state->smb1.hdr, HDR_WCT, wct);
854 state->smb1.vwv = vwv;
856 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
858 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
859 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
860 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
861 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
862 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
863 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
864 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
865 state->smb1.iov[3].iov_len = sizeof(uint16_t);
867 if (iov_count != 0) {
868 memcpy(&state->smb1.iov[4], bytes_iov,
869 iov_count * sizeof(*bytes_iov));
871 state->smb1.iov_count = iov_count + 4;
873 if (timeout_msec > 0) {
874 struct timeval endtime;
876 endtime = timeval_current_ofs_msec(timeout_msec);
877 if (!tevent_req_set_endtime(req, ev, endtime)) {
882 switch (smb_command) {
887 state->one_way = true;
891 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
892 state->one_way = true;
900 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
901 struct iovec *iov, int iov_count,
907 * Obvious optimization: Make cli_calculate_sign_mac work with struct
908 * iovec directly. MD5Update would do that just fine.
912 return NT_STATUS_INVALID_PARAMETER_MIX;
914 if (iov[0].iov_len != NBT_HDR_SIZE) {
915 return NT_STATUS_INVALID_PARAMETER_MIX;
917 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
918 return NT_STATUS_INVALID_PARAMETER_MIX;
920 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
921 return NT_STATUS_INVALID_PARAMETER_MIX;
923 if (iov[3].iov_len != sizeof(uint16_t)) {
924 return NT_STATUS_INVALID_PARAMETER_MIX;
927 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
929 return NT_STATUS_NO_MEMORY;
932 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
933 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
934 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
940 static void smb1cli_req_writev_done(struct tevent_req *subreq);
941 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
945 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
946 struct smbXcli_req_state *state,
947 struct iovec *iov, int iov_count)
949 struct tevent_req *subreq;
953 if (!smbXcli_conn_is_connected(state->conn)) {
954 return NT_STATUS_CONNECTION_DISCONNECTED;
957 if (state->conn->protocol > PROTOCOL_NT1) {
958 return NT_STATUS_REVISION_MISMATCH;
962 return NT_STATUS_INVALID_PARAMETER_MIX;
964 if (iov[0].iov_len != NBT_HDR_SIZE) {
965 return NT_STATUS_INVALID_PARAMETER_MIX;
967 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
968 return NT_STATUS_INVALID_PARAMETER_MIX;
970 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
971 return NT_STATUS_INVALID_PARAMETER_MIX;
973 if (iov[3].iov_len != sizeof(uint16_t)) {
974 return NT_STATUS_INVALID_PARAMETER_MIX;
977 if (state->smb1.mid != 0) {
978 mid = state->smb1.mid;
980 mid = smb1cli_alloc_mid(state->conn);
982 SSVAL(iov[1].iov_base, HDR_MID, mid);
984 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
986 status = smb1cli_conn_signv(state->conn, iov, iov_count,
987 &state->smb1.seqnum);
989 if (!NT_STATUS_IS_OK(status)) {
994 * If we supported multiple encrytion contexts
995 * here we'd look up based on tid.
997 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1000 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1002 return NT_STATUS_NO_MEMORY;
1004 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1005 (char *)buf, &enc_buf);
1007 if (!NT_STATUS_IS_OK(status)) {
1008 DEBUG(0, ("Error in encrypting client message: %s\n",
1009 nt_errstr(status)));
1012 buf = (char *)talloc_memdup(state, enc_buf,
1013 smb_len_nbt(enc_buf)+4);
1016 return NT_STATUS_NO_MEMORY;
1018 iov[0].iov_base = (void *)buf;
1019 iov[0].iov_len = talloc_get_size(buf);
1023 if (state->conn->dispatch_incoming == NULL) {
1024 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1027 subreq = writev_send(state, state->ev, state->conn->outgoing,
1028 state->conn->fd, false, iov, iov_count);
1029 if (subreq == NULL) {
1030 return NT_STATUS_NO_MEMORY;
1032 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1033 return NT_STATUS_OK;
1036 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1037 struct tevent_context *ev,
1038 struct smbXcli_conn *conn,
1039 uint8_t smb_command,
1040 uint8_t additional_flags,
1041 uint8_t clear_flags,
1042 uint16_t additional_flags2,
1043 uint16_t clear_flags2,
1044 uint32_t timeout_msec,
1048 uint8_t wct, uint16_t *vwv,
1050 const uint8_t *bytes)
1052 struct tevent_req *req;
1056 iov.iov_base = discard_const_p(void, bytes);
1057 iov.iov_len = num_bytes;
1059 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1060 additional_flags, clear_flags,
1061 additional_flags2, clear_flags2,
1068 if (!tevent_req_is_in_progress(req)) {
1069 return tevent_req_post(req, ev);
1071 status = smb1cli_req_chain_submit(&req, 1);
1072 if (tevent_req_nterror(req, status)) {
1073 return tevent_req_post(req, ev);
1078 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1080 struct tevent_req *req =
1081 tevent_req_callback_data(subreq,
1083 struct smbXcli_req_state *state =
1084 tevent_req_data(req,
1085 struct smbXcli_req_state);
1089 nwritten = writev_recv(subreq, &err);
1090 TALLOC_FREE(subreq);
1091 if (nwritten == -1) {
1092 NTSTATUS status = map_nt_error_from_unix_common(err);
1093 smbXcli_conn_disconnect(state->conn, status);
1097 if (state->one_way) {
1098 state->inbuf = NULL;
1099 tevent_req_done(req);
1103 if (!smbXcli_req_set_pending(req)) {
1104 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1109 static void smbXcli_conn_received(struct tevent_req *subreq)
1111 struct smbXcli_conn *conn =
1112 tevent_req_callback_data(subreq,
1113 struct smbXcli_conn);
1114 TALLOC_CTX *frame = talloc_stackframe();
1120 if (subreq != conn->read_smb_req) {
1121 DEBUG(1, ("Internal error: cli_smb_received called with "
1122 "unexpected subreq\n"));
1123 status = NT_STATUS_INTERNAL_ERROR;
1124 smbXcli_conn_disconnect(conn, status);
1128 conn->read_smb_req = NULL;
1130 received = read_smb_recv(subreq, frame, &inbuf, &err);
1131 TALLOC_FREE(subreq);
1132 if (received == -1) {
1133 status = map_nt_error_from_unix_common(err);
1134 smbXcli_conn_disconnect(conn, status);
1139 status = conn->dispatch_incoming(conn, frame, inbuf);
1141 if (NT_STATUS_IS_OK(status)) {
1143 * We should not do any more processing
1144 * as the dispatch function called
1145 * tevent_req_done().
1148 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1150 * We got an error, so notify all pending requests
1152 smbXcli_conn_disconnect(conn, status);
1157 * We got NT_STATUS_RETRY, so we may ask for a
1158 * next incoming pdu.
1160 if (!smbXcli_conn_receive_next(conn)) {
1161 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1165 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1166 struct iovec **piov, int *pnum_iov)
1177 buflen = smb_len_nbt(buf);
1180 hdr = buf + NBT_HDR_SIZE;
1182 if (buflen < MIN_SMB_SIZE) {
1183 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1187 * This returns iovec elements in the following order:
1202 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1204 return NT_STATUS_NO_MEMORY;
1206 iov[0].iov_base = hdr;
1207 iov[0].iov_len = HDR_WCT;
1210 cmd = CVAL(hdr, HDR_COM);
1214 size_t len = buflen - taken;
1216 struct iovec *iov_tmp;
1223 * we need at least WCT and BCC
1225 needed = sizeof(uint8_t) + sizeof(uint16_t);
1227 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1228 __location__, (int)len, (int)needed));
1233 * Now we check if the specified words are there
1235 wct = CVAL(hdr, wct_ofs);
1236 needed += wct * sizeof(uint16_t);
1238 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1239 __location__, (int)len, (int)needed));
1244 * Now we check if the specified bytes are there
1246 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1247 bcc = SVAL(hdr, bcc_ofs);
1248 needed += bcc * sizeof(uint8_t);
1250 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1251 __location__, (int)len, (int)needed));
1256 * we allocate 2 iovec structures for words and bytes
1258 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1260 if (iov_tmp == NULL) {
1262 return NT_STATUS_NO_MEMORY;
1265 cur = &iov[num_iov];
1268 cur[0].iov_len = wct * sizeof(uint16_t);
1269 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1270 cur[1].iov_len = bcc * sizeof(uint8_t);
1271 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1275 if (!smb1cli_is_andx_req(cmd)) {
1277 * If the current command does not have AndX chanining
1283 if (wct == 0 && bcc == 0) {
1285 * An empty response also ends the chain,
1286 * most likely with an error.
1292 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1293 __location__, (int)wct, (int)cmd));
1296 cmd = CVAL(cur[0].iov_base, 0);
1299 * If it is the end of the chain we are also done.
1303 wct_ofs = SVAL(cur[0].iov_base, 2);
1305 if (wct_ofs < taken) {
1306 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1308 if (wct_ofs > buflen) {
1309 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1313 * we consumed everything up to the start of the next
1319 remaining = buflen - taken;
1321 if (remaining > 0 && num_iov >= 3) {
1323 * The last DATA block gets the remaining
1324 * bytes, this is needed to support
1325 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1327 iov[num_iov-1].iov_len += remaining;
1331 *pnum_iov = num_iov;
1332 return NT_STATUS_OK;
1336 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1339 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1340 TALLOC_CTX *tmp_mem,
1343 struct tevent_req *req;
1344 struct smbXcli_req_state *state;
1351 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1352 struct iovec *iov = NULL;
1354 struct tevent_req **chain = NULL;
1355 size_t num_chained = 0;
1356 size_t num_responses = 0;
1358 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1359 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1360 DEBUG(10, ("Got non-SMB PDU\n"));
1361 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1365 * If we supported multiple encrytion contexts
1366 * here we'd look up based on tid.
1368 if (common_encryption_on(conn->smb1.trans_enc)
1369 && (CVAL(inbuf, 0) == 0)) {
1370 uint16_t enc_ctx_num;
1372 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1373 if (!NT_STATUS_IS_OK(status)) {
1374 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1375 nt_errstr(status)));
1379 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1380 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1382 conn->smb1.trans_enc->enc_ctx_num));
1383 return NT_STATUS_INVALID_HANDLE;
1386 status = common_decrypt_buffer(conn->smb1.trans_enc,
1388 if (!NT_STATUS_IS_OK(status)) {
1389 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1390 nt_errstr(status)));
1395 mid = SVAL(inhdr, HDR_MID);
1396 num_pending = talloc_array_length(conn->pending);
1398 for (i=0; i<num_pending; i++) {
1399 if (mid == smb1cli_req_mid(conn->pending[i])) {
1403 if (i == num_pending) {
1404 /* Dump unexpected reply */
1405 return NT_STATUS_RETRY;
1408 oplock_break = false;
1410 if (mid == 0xffff) {
1412 * Paranoia checks that this is really an oplock break request.
1414 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1415 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1416 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1417 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1418 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1420 if (!oplock_break) {
1421 /* Dump unexpected reply */
1422 return NT_STATUS_RETRY;
1426 req = conn->pending[i];
1427 state = tevent_req_data(req, struct smbXcli_req_state);
1429 if (!oplock_break /* oplock breaks are not signed */
1430 && !smb_signing_check_pdu(conn->smb1.signing,
1431 inbuf, state->smb1.seqnum+1)) {
1432 DEBUG(10, ("cli_check_sign_mac failed\n"));
1433 return NT_STATUS_ACCESS_DENIED;
1436 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1438 if (!NT_STATUS_IS_OK(status)) {
1439 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1440 nt_errstr(status)));
1444 cmd = CVAL(inhdr, HDR_COM);
1445 status = smb1cli_pull_raw_error(inhdr);
1447 if (state->smb1.chained_requests == NULL) {
1449 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1452 smbXcli_req_unset_pending(req);
1454 state->smb1.recv_cmd = cmd;
1455 state->smb1.recv_status = status;
1456 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1458 state->smb1.recv_iov[0] = iov[0];
1459 state->smb1.recv_iov[1] = iov[1];
1460 state->smb1.recv_iov[2] = iov[2];
1462 if (talloc_array_length(conn->pending) == 0) {
1463 tevent_req_done(req);
1464 return NT_STATUS_OK;
1467 tevent_req_defer_callback(req, state->ev);
1468 tevent_req_done(req);
1469 return NT_STATUS_RETRY;
1472 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1473 num_chained = talloc_array_length(chain);
1474 num_responses = (num_iov - 1)/2;
1476 if (num_responses > num_chained) {
1477 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1480 for (i=0; i<num_chained; i++) {
1481 size_t iov_idx = 1 + (i*2);
1482 struct iovec *cur = &iov[iov_idx];
1486 state = tevent_req_data(req, struct smbXcli_req_state);
1488 smbXcli_req_unset_pending(req);
1491 * as we finish multiple requests here
1492 * we need to defer the callbacks as
1493 * they could destroy our current stack state.
1495 tevent_req_defer_callback(req, state->ev);
1497 if (i >= num_responses) {
1498 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1502 state->smb1.recv_cmd = cmd;
1504 if (i == (num_responses - 1)) {
1506 * The last request in the chain gets the status
1508 state->smb1.recv_status = status;
1510 cmd = CVAL(cur[0].iov_base, 0);
1511 state->smb1.recv_status = NT_STATUS_OK;
1514 state->inbuf = inbuf;
1517 * Note: here we use talloc_reference() in a way
1518 * that does not expose it to the caller.
1520 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1521 if (tevent_req_nomem(inbuf_ref, req)) {
1525 /* copy the related buffers */
1526 state->smb1.recv_iov[0] = iov[0];
1527 state->smb1.recv_iov[1] = cur[0];
1528 state->smb1.recv_iov[2] = cur[1];
1530 tevent_req_done(req);
1533 return NT_STATUS_RETRY;
1536 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1537 TALLOC_CTX *mem_ctx,
1538 struct iovec **piov,
1542 uint32_t *pvwv_offset,
1543 uint32_t *pnum_bytes,
1545 uint32_t *pbytes_offset,
1547 const struct smb1cli_req_expected_response *expected,
1548 size_t num_expected)
1550 struct smbXcli_req_state *state =
1551 tevent_req_data(req,
1552 struct smbXcli_req_state);
1553 NTSTATUS status = NT_STATUS_OK;
1554 struct iovec *recv_iov = NULL;
1555 uint8_t *hdr = NULL;
1557 uint32_t vwv_offset = 0;
1558 uint16_t *vwv = NULL;
1559 uint32_t num_bytes = 0;
1560 uint32_t bytes_offset = 0;
1561 uint8_t *bytes = NULL;
1563 bool found_status = false;
1564 bool found_size = false;
1578 if (pvwv_offset != NULL) {
1581 if (pnum_bytes != NULL) {
1584 if (pbytes != NULL) {
1587 if (pbytes_offset != NULL) {
1590 if (pinbuf != NULL) {
1594 if (state->inbuf != NULL) {
1595 recv_iov = state->smb1.recv_iov;
1596 hdr = (uint8_t *)recv_iov[0].iov_base;
1597 wct = recv_iov[1].iov_len/2;
1598 vwv = (uint16_t *)recv_iov[1].iov_base;
1599 vwv_offset = PTR_DIFF(vwv, hdr);
1600 num_bytes = recv_iov[2].iov_len;
1601 bytes = (uint8_t *)recv_iov[2].iov_base;
1602 bytes_offset = PTR_DIFF(bytes, hdr);
1605 if (tevent_req_is_nterror(req, &status)) {
1606 for (i=0; i < num_expected; i++) {
1607 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1608 found_status = true;
1614 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1620 if (num_expected == 0) {
1621 found_status = true;
1625 status = state->smb1.recv_status;
1627 for (i=0; i < num_expected; i++) {
1628 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1632 found_status = true;
1633 if (expected[i].wct == 0) {
1638 if (expected[i].wct == wct) {
1644 if (!found_status) {
1649 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1653 *piov = talloc_move(mem_ctx, &recv_iov);
1665 if (pvwv_offset != NULL) {
1666 *pvwv_offset = vwv_offset;
1668 if (pnum_bytes != NULL) {
1669 *pnum_bytes = num_bytes;
1671 if (pbytes != NULL) {
1674 if (pbytes_offset != NULL) {
1675 *pbytes_offset = bytes_offset;
1677 if (pinbuf != NULL) {
1678 *pinbuf = state->inbuf;
1684 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1691 for (i=0; i<num_reqs; i++) {
1692 struct smbXcli_req_state *state;
1693 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1694 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1695 state->smb1.iov_count-2);
1696 wct_ofs = (wct_ofs + 3) & ~3;
1701 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1703 struct smbXcli_req_state *first_state =
1704 tevent_req_data(reqs[0],
1705 struct smbXcli_req_state);
1706 struct smbXcli_req_state *last_state =
1707 tevent_req_data(reqs[num_reqs-1],
1708 struct smbXcli_req_state);
1709 struct smbXcli_req_state *state;
1711 size_t chain_padding = 0;
1713 struct iovec *iov = NULL;
1714 struct iovec *this_iov;
1718 if (num_reqs == 1) {
1719 return smb1cli_req_writev_submit(reqs[0], first_state,
1720 first_state->smb1.iov,
1721 first_state->smb1.iov_count);
1725 for (i=0; i<num_reqs; i++) {
1726 if (!tevent_req_is_in_progress(reqs[i])) {
1727 return NT_STATUS_INTERNAL_ERROR;
1730 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1732 if (state->smb1.iov_count < 4) {
1733 return NT_STATUS_INVALID_PARAMETER_MIX;
1738 * The NBT and SMB header
1751 iovlen += state->smb1.iov_count - 2;
1754 iov = talloc_zero_array(last_state, struct iovec, iovlen);
1756 return NT_STATUS_NO_MEMORY;
1759 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1760 last_state, reqs, sizeof(*reqs) * num_reqs);
1761 if (first_state->smb1.chained_requests == NULL) {
1763 return NT_STATUS_NO_MEMORY;
1766 wct_offset = HDR_WCT;
1769 for (i=0; i<num_reqs; i++) {
1770 size_t next_padding = 0;
1773 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1775 if (i < num_reqs-1) {
1776 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1777 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1779 TALLOC_FREE(first_state->smb1.chained_requests);
1780 return NT_STATUS_INVALID_PARAMETER_MIX;
1784 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1785 state->smb1.iov_count-2) + 1;
1786 if ((wct_offset % 4) != 0) {
1787 next_padding = 4 - (wct_offset % 4);
1789 wct_offset += next_padding;
1790 vwv = state->smb1.vwv;
1792 if (i < num_reqs-1) {
1793 struct smbXcli_req_state *next_state =
1794 tevent_req_data(reqs[i+1],
1795 struct smbXcli_req_state);
1796 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1798 SSVAL(vwv+1, 0, wct_offset);
1799 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1800 /* properly end the chain */
1801 SCVAL(vwv+0, 0, 0xff);
1802 SCVAL(vwv+0, 1, 0xff);
1808 * The NBT and SMB header
1810 this_iov[0] = state->smb1.iov[0];
1811 this_iov[1] = state->smb1.iov[1];
1815 * This one is a bit subtle. We have to add
1816 * chain_padding bytes between the requests, and we
1817 * have to also include the wct field of the
1818 * subsequent requests. We use the subsequent header
1819 * for the padding, it contains the wct field in its
1822 this_iov[0].iov_len = chain_padding+1;
1823 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1824 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1825 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1830 * copy the words and bytes
1832 memcpy(this_iov, state->smb1.iov+2,
1833 sizeof(struct iovec) * (state->smb1.iov_count-2));
1834 this_iov += state->smb1.iov_count - 2;
1835 chain_padding = next_padding;
1838 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1839 if (nbt_len > first_state->conn->smb1.max_xmit) {
1841 TALLOC_FREE(first_state->smb1.chained_requests);
1842 return NT_STATUS_INVALID_PARAMETER_MIX;
1845 status = smb1cli_req_writev_submit(reqs[0], last_state, iov, iovlen);
1846 if (!NT_STATUS_IS_OK(status)) {
1848 TALLOC_FREE(first_state->smb1.chained_requests);
1852 for (i=0; i < (num_reqs - 1); i++) {
1853 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1855 state->smb1.seqnum = last_state->smb1.seqnum;
1858 return NT_STATUS_OK;
1861 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1863 return ((tevent_queue_length(conn->outgoing) != 0)
1864 || (talloc_array_length(conn->pending) != 0));
1867 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
1868 struct tevent_context *ev,
1869 struct smbXcli_conn *conn,
1871 uint32_t additional_flags,
1872 uint32_t clear_flags,
1873 uint32_t timeout_msec,
1877 const uint8_t *fixed,
1882 struct tevent_req *req;
1883 struct smbXcli_req_state *state;
1886 req = tevent_req_create(mem_ctx, &state,
1887 struct smbXcli_req_state);
1895 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
1896 if (state->smb2.recv_iov == NULL) {
1901 flags |= additional_flags;
1902 flags &= ~clear_flags;
1904 state->smb2.fixed = fixed;
1905 state->smb2.fixed_len = fixed_len;
1906 state->smb2.dyn = dyn;
1907 state->smb2.dyn_len = dyn_len;
1909 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
1910 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
1911 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, 1);
1912 SIVAL(state->smb2.hdr, SMB2_HDR_STATUS, NT_STATUS_V(NT_STATUS_OK));
1913 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
1914 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, 31);
1915 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
1916 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
1917 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
1918 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
1921 case SMB2_OP_CANCEL:
1922 state->one_way = true;
1926 * If this is a dummy request, it will have
1927 * UINT64_MAX as message id.
1928 * If we send on break acknowledgement,
1929 * this gets overwritten later.
1931 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
1935 if (timeout_msec > 0) {
1936 struct timeval endtime;
1938 endtime = timeval_current_ofs_msec(timeout_msec);
1939 if (!tevent_req_set_endtime(req, ev, endtime)) {
1947 static void smb2cli_writev_done(struct tevent_req *subreq);
1948 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1949 TALLOC_CTX *tmp_mem,
1952 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
1955 struct smbXcli_req_state *state;
1956 struct tevent_req *subreq;
1958 int i, num_iov, nbt_len;
1961 * 1 for the nbt length
1962 * per request: HDR, fixed, dyn, padding
1963 * -1 because the last one does not need padding
1966 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
1968 return NT_STATUS_NO_MEMORY;
1974 for (i=0; i<num_reqs; i++) {
1979 if (!tevent_req_is_in_progress(reqs[i])) {
1980 return NT_STATUS_INTERNAL_ERROR;
1983 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1985 if (!smbXcli_conn_is_connected(state->conn)) {
1986 return NT_STATUS_CONNECTION_DISCONNECTED;
1989 if ((state->conn->protocol != PROTOCOL_NONE) &&
1990 (state->conn->protocol < PROTOCOL_SMB2_02)) {
1991 return NT_STATUS_REVISION_MISMATCH;
1994 if (state->conn->smb2.mid == UINT64_MAX) {
1995 return NT_STATUS_CONNECTION_ABORTED;
1998 mid = state->conn->smb2.mid;
1999 state->conn->smb2.mid += 1;
2001 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2003 iov[num_iov].iov_base = state->smb2.hdr;
2004 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2007 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2008 iov[num_iov].iov_len = state->smb2.fixed_len;
2011 if (state->smb2.dyn != NULL) {
2012 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2013 iov[num_iov].iov_len = state->smb2.dyn_len;
2017 reqlen = sizeof(state->smb2.hdr);
2018 reqlen += state->smb2.fixed_len;
2019 reqlen += state->smb2.dyn_len;
2021 if (i < num_reqs-1) {
2022 if ((reqlen % 8) > 0) {
2023 uint8_t pad = 8 - (reqlen % 8);
2024 iov[num_iov].iov_base = state->smb2.pad;
2025 iov[num_iov].iov_len = pad;
2029 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2033 ret = smbXcli_req_set_pending(reqs[i]);
2035 return NT_STATUS_NO_MEMORY;
2040 * TODO: Do signing here
2043 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2044 _smb_setlen_tcp(state->length_hdr, nbt_len);
2045 iov[0].iov_base = state->length_hdr;
2046 iov[0].iov_len = sizeof(state->length_hdr);
2048 if (state->conn->dispatch_incoming == NULL) {
2049 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2052 subreq = writev_send(state, state->ev, state->conn->outgoing,
2053 state->conn->fd, false, iov, num_iov);
2054 if (subreq == NULL) {
2055 return NT_STATUS_NO_MEMORY;
2057 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2058 return NT_STATUS_OK;
2061 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2062 struct tevent_context *ev,
2063 struct smbXcli_conn *conn,
2065 uint32_t additional_flags,
2066 uint32_t clear_flags,
2067 uint32_t timeout_msec,
2071 const uint8_t *fixed,
2076 struct tevent_req *req;
2079 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2080 additional_flags, clear_flags,
2083 fixed, fixed_len, dyn, dyn_len);
2087 if (!tevent_req_is_in_progress(req)) {
2088 return tevent_req_post(req, ev);
2090 status = smb2cli_req_compound_submit(&req, 1);
2091 if (tevent_req_nterror(req, status)) {
2092 return tevent_req_post(req, ev);
2097 static void smb2cli_writev_done(struct tevent_req *subreq)
2099 struct tevent_req *req =
2100 tevent_req_callback_data(subreq,
2102 struct smbXcli_req_state *state =
2103 tevent_req_data(req,
2104 struct smbXcli_req_state);
2108 nwritten = writev_recv(subreq, &err);
2109 TALLOC_FREE(subreq);
2110 if (nwritten == -1) {
2111 /* here, we need to notify all pending requests */
2112 NTSTATUS status = map_nt_error_from_unix_common(err);
2113 smbXcli_conn_disconnect(state->conn, status);
2118 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2119 struct iovec **piov, int *pnum_iov)
2129 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2131 return NT_STATUS_NO_MEMORY;
2134 buflen = smb_len_tcp(buf);
2136 first_hdr = buf + NBT_HDR_SIZE;
2138 while (taken < buflen) {
2139 size_t len = buflen - taken;
2140 uint8_t *hdr = first_hdr + taken;
2143 size_t next_command_ofs;
2145 struct iovec *iov_tmp;
2148 * We need the header plus the body length field
2151 if (len < SMB2_HDR_BODY + 2) {
2152 DEBUG(10, ("%d bytes left, expected at least %d\n",
2153 (int)len, SMB2_HDR_BODY));
2156 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2157 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2161 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2162 DEBUG(10, ("Got HDR len %d, expected %d\n",
2163 SVAL(hdr, 4), SMB2_HDR_BODY));
2168 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2169 body_size = SVAL(hdr, SMB2_HDR_BODY);
2171 if (next_command_ofs != 0) {
2172 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2175 if (next_command_ofs > full_size) {
2178 full_size = next_command_ofs;
2180 if (body_size < 2) {
2183 body_size &= 0xfffe;
2185 if (body_size > (full_size - SMB2_HDR_BODY)) {
2189 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2191 if (iov_tmp == NULL) {
2193 return NT_STATUS_NO_MEMORY;
2196 cur = &iov[num_iov];
2199 cur[0].iov_base = hdr;
2200 cur[0].iov_len = SMB2_HDR_BODY;
2201 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2202 cur[1].iov_len = body_size;
2203 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2204 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2210 *pnum_iov = num_iov;
2211 return NT_STATUS_OK;
2215 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2218 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2221 size_t num_pending = talloc_array_length(conn->pending);
2224 for (i=0; i<num_pending; i++) {
2225 struct tevent_req *req = conn->pending[i];
2226 struct smbXcli_req_state *state =
2227 tevent_req_data(req,
2228 struct smbXcli_req_state);
2230 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2237 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2238 TALLOC_CTX *tmp_mem,
2241 struct tevent_req *req;
2242 struct smbXcli_req_state *state = NULL;
2248 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2250 if (!NT_STATUS_IS_OK(status)) {
2254 for (i=0; i<num_iov; i+=3) {
2255 uint8_t *inbuf_ref = NULL;
2256 struct iovec *cur = &iov[i];
2257 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2258 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2259 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2260 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2261 uint16_t req_opcode;
2263 req = smb2cli_conn_find_pending(conn, mid);
2265 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2267 state = tevent_req_data(req, struct smbXcli_req_state);
2269 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2270 if (opcode != req_opcode) {
2271 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2274 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2275 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2278 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2279 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2280 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2281 uint32_t req_flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2282 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2284 req_flags |= SMB2_HDR_FLAG_ASYNC;
2285 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2286 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2290 smbXcli_req_unset_pending(req);
2293 * There might be more than one response
2294 * we need to defer the notifications
2296 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2301 tevent_req_defer_callback(req, state->ev);
2305 * Note: here we use talloc_reference() in a way
2306 * that does not expose it to the caller.
2308 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2309 if (tevent_req_nomem(inbuf_ref, req)) {
2313 /* copy the related buffers */
2314 state->smb2.recv_iov[0] = cur[0];
2315 state->smb2.recv_iov[1] = cur[1];
2316 state->smb2.recv_iov[2] = cur[2];
2318 tevent_req_done(req);
2322 return NT_STATUS_RETRY;
2325 return NT_STATUS_OK;
2328 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2329 struct iovec **piov,
2330 const struct smb2cli_req_expected_response *expected,
2331 size_t num_expected)
2333 struct smbXcli_req_state *state =
2334 tevent_req_data(req,
2335 struct smbXcli_req_state);
2338 bool found_status = false;
2339 bool found_size = false;
2346 if (tevent_req_is_nterror(req, &status)) {
2347 for (i=0; i < num_expected; i++) {
2348 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2349 found_status = true;
2355 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2361 if (num_expected == 0) {
2362 found_status = true;
2366 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2367 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2369 for (i=0; i < num_expected; i++) {
2370 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2374 found_status = true;
2375 if (expected[i].body_size == 0) {
2380 if (expected[i].body_size == body_size) {
2386 if (!found_status) {
2391 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2395 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);