2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
37 struct smbXcli_session;
42 struct sockaddr_storage local_ss;
43 struct sockaddr_storage remote_ss;
44 const char *remote_name;
46 struct tevent_queue *outgoing;
47 struct tevent_req **pending;
48 struct tevent_req *read_smb_req;
50 enum protocol_types protocol;
53 bool mandatory_signing;
56 * The incoming dispatch function should return:
57 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
58 * - NT_STATUS_OK, if no more processing is desired, e.g.
59 * the dispatch function called
61 * - All other return values disconnect the connection.
63 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
69 uint32_t capabilities;
74 uint32_t capabilities;
77 uint16_t security_mode;
86 const char *workgroup;
92 uint32_t capabilities;
97 struct smb_signing_state *signing;
98 struct smb_trans_enc_state *trans_enc;
100 struct tevent_req *read_braw_req;
105 uint32_t capabilities;
106 uint16_t security_mode;
111 uint32_t capabilities;
112 uint16_t security_mode;
114 uint32_t max_trans_size;
115 uint32_t max_read_size;
116 uint32_t max_write_size;
123 uint16_t cur_credits;
124 uint16_t max_credits;
127 struct smbXcli_session *sessions;
130 struct smbXcli_session {
131 struct smbXcli_session *prev, *next;
132 struct smbXcli_conn *conn;
136 uint16_t session_flags;
137 DATA_BLOB application_key;
138 DATA_BLOB signing_key;
140 DATA_BLOB channel_signing_key;
144 struct smbXcli_req_state {
145 struct tevent_context *ev;
146 struct smbXcli_conn *conn;
147 struct smbXcli_session *session; /* maybe NULL */
149 uint8_t length_hdr[4];
156 /* Space for the header including the wct */
157 uint8_t hdr[HDR_VWV];
160 * For normal requests, smb1cli_req_send chooses a mid.
161 * SecondaryV trans requests need to use the mid of the primary
162 * request, so we need a place to store it.
163 * Assume it is set if != 0.
168 uint8_t bytecount_buf[2];
170 #define MAX_SMB_IOV 10
171 /* length_hdr, hdr, words, byte_count, buffers */
172 struct iovec iov[1 + 3 + MAX_SMB_IOV];
177 struct tevent_req **chained_requests;
180 NTSTATUS recv_status;
181 /* always an array of 3 talloc elements */
182 struct iovec *recv_iov;
186 const uint8_t *fixed;
192 uint8_t pad[7]; /* padding space for compounding */
194 /* always an array of 3 talloc elements */
195 struct iovec *recv_iov;
197 uint16_t credit_charge;
199 bool signing_skipped;
205 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
208 * NT_STATUS_OK, means we do not notify the callers
210 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
212 while (conn->sessions) {
213 conn->sessions->conn = NULL;
214 DLIST_REMOVE(conn->sessions, conn->sessions);
218 if (conn->smb1.trans_enc) {
219 TALLOC_FREE(conn->smb1.trans_enc);
223 >>>>>>> 7efc635... s3-libsmb: Convert struct smb_trans_enc_state to talloc
227 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
229 const char *remote_name,
230 enum smb_signing_setting signing_state,
231 uint32_t smb1_capabilities,
232 struct GUID *client_guid,
233 uint32_t smb2_capabilities)
235 struct smbXcli_conn *conn = NULL;
237 struct sockaddr *sa = NULL;
241 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
247 conn->write_fd = dup(fd);
248 if (conn->write_fd == -1) {
252 conn->remote_name = talloc_strdup(conn, remote_name);
253 if (conn->remote_name == NULL) {
258 ss = (void *)&conn->local_ss;
259 sa = (struct sockaddr *)ss;
260 sa_length = sizeof(conn->local_ss);
261 ret = getsockname(fd, sa, &sa_length);
265 ss = (void *)&conn->remote_ss;
266 sa = (struct sockaddr *)ss;
267 sa_length = sizeof(conn->remote_ss);
268 ret = getpeername(fd, sa, &sa_length);
273 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
274 if (conn->outgoing == NULL) {
277 conn->pending = NULL;
279 conn->protocol = PROTOCOL_NONE;
281 switch (signing_state) {
282 case SMB_SIGNING_OFF:
284 conn->allow_signing = false;
285 conn->desire_signing = false;
286 conn->mandatory_signing = false;
288 case SMB_SIGNING_DEFAULT:
289 case SMB_SIGNING_IF_REQUIRED:
290 /* if the server requires it */
291 conn->allow_signing = true;
292 conn->desire_signing = false;
293 conn->mandatory_signing = false;
295 case SMB_SIGNING_REQUIRED:
297 conn->allow_signing = true;
298 conn->desire_signing = true;
299 conn->mandatory_signing = true;
303 conn->smb1.client.capabilities = smb1_capabilities;
304 conn->smb1.client.max_xmit = UINT16_MAX;
306 conn->smb1.capabilities = conn->smb1.client.capabilities;
307 conn->smb1.max_xmit = 1024;
311 /* initialise signing */
312 conn->smb1.signing = smb_signing_init(conn,
314 conn->desire_signing,
315 conn->mandatory_signing);
316 if (!conn->smb1.signing) {
320 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
321 if (conn->mandatory_signing) {
322 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
325 conn->smb2.client.guid = *client_guid;
327 conn->smb2.client.capabilities = smb2_capabilities;
329 conn->smb2.cur_credits = 1;
330 conn->smb2.max_credits = 0;
332 talloc_set_destructor(conn, smbXcli_conn_destructor);
336 if (conn->write_fd != -1) {
337 close(conn->write_fd);
343 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
349 if (conn->read_fd == -1) {
356 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
358 return conn->protocol;
361 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
363 if (conn->protocol >= PROTOCOL_SMB2_02) {
367 if (conn->smb1.capabilities & CAP_UNICODE) {
374 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
376 set_socket_options(conn->read_fd, options);
379 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
381 return &conn->local_ss;
384 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
386 return &conn->remote_ss;
389 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
391 return conn->remote_name;
394 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
396 if (conn->protocol >= PROTOCOL_SMB2_02) {
403 return conn->smb1.server.max_mux;
406 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
408 if (conn->protocol >= PROTOCOL_SMB2_02) {
409 return conn->smb2.server.system_time;
412 return conn->smb1.server.system_time;
415 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
417 if (conn->protocol >= PROTOCOL_SMB2_02) {
418 return &conn->smb2.server.gss_blob;
421 return &conn->smb1.server.gss_blob;
424 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
426 if (conn->protocol >= PROTOCOL_SMB2_02) {
427 return &conn->smb2.server.guid;
430 return &conn->smb1.server.guid;
433 struct smbXcli_conn_samba_suicide_state {
434 struct smbXcli_conn *conn;
439 static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq);
441 struct tevent_req *smbXcli_conn_samba_suicide_send(TALLOC_CTX *mem_ctx,
442 struct tevent_context *ev,
443 struct smbXcli_conn *conn,
446 struct tevent_req *req, *subreq;
447 struct smbXcli_conn_samba_suicide_state *state;
449 req = tevent_req_create(mem_ctx, &state,
450 struct smbXcli_conn_samba_suicide_state);
455 SIVAL(state->buf, 4, 0x74697865);
456 SCVAL(state->buf, 8, exitcode);
457 _smb_setlen_nbt(state->buf, sizeof(state->buf)-4);
459 state->iov.iov_base = state->buf;
460 state->iov.iov_len = sizeof(state->buf);
462 subreq = writev_send(state, ev, conn->outgoing, conn->write_fd,
463 false, &state->iov, 1);
464 if (tevent_req_nomem(subreq, req)) {
465 return tevent_req_post(req, ev);
467 tevent_req_set_callback(subreq, smbXcli_conn_samba_suicide_done, req);
471 static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq)
473 struct tevent_req *req = tevent_req_callback_data(
474 subreq, struct tevent_req);
475 struct smbXcli_conn_samba_suicide_state *state = tevent_req_data(
476 req, struct smbXcli_conn_samba_suicide_state);
480 nwritten = writev_recv(subreq, &err);
482 if (nwritten == -1) {
483 NTSTATUS status = map_nt_error_from_unix_common(err);
484 smbXcli_conn_disconnect(state->conn, status);
487 tevent_req_done(req);
490 NTSTATUS smbXcli_conn_samba_suicide_recv(struct tevent_req *req)
492 return tevent_req_simple_recv_ntstatus(req);
495 NTSTATUS smbXcli_conn_samba_suicide(struct smbXcli_conn *conn,
498 TALLOC_CTX *frame = talloc_stackframe();
499 struct tevent_context *ev;
500 struct tevent_req *req;
501 NTSTATUS status = NT_STATUS_NO_MEMORY;
504 if (smbXcli_conn_has_async_calls(conn)) {
506 * Can't use sync call while an async call is in flight
508 status = NT_STATUS_INVALID_PARAMETER_MIX;
511 ev = tevent_context_init(frame);
515 req = smbXcli_conn_samba_suicide_send(frame, ev, conn, exitcode);
519 ok = tevent_req_poll(req, ev);
521 status = map_nt_error_from_unix_common(errno);
524 status = smbXcli_conn_samba_suicide_recv(req);
530 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
532 return conn->smb1.capabilities;
535 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
537 return conn->smb1.max_xmit;
540 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
542 return conn->smb1.server.session_key;
545 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
547 return conn->smb1.server.challenge;
550 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
552 return conn->smb1.server.security_mode;
555 bool smb1cli_conn_server_readbraw(struct smbXcli_conn *conn)
557 return conn->smb1.server.readbraw;
560 bool smb1cli_conn_server_writebraw(struct smbXcli_conn *conn)
562 return conn->smb1.server.writebraw;
565 bool smb1cli_conn_server_lockread(struct smbXcli_conn *conn)
567 return conn->smb1.server.lockread;
570 bool smb1cli_conn_server_writeunlock(struct smbXcli_conn *conn)
572 return conn->smb1.server.writeunlock;
575 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
577 return conn->smb1.server.time_zone;
580 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
581 const DATA_BLOB user_session_key,
582 const DATA_BLOB response)
584 return smb_signing_activate(conn->smb1.signing,
589 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
590 const uint8_t *buf, uint32_t seqnum)
592 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
595 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
597 return smb_signing_is_active(conn->smb1.signing);
600 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
601 struct smb_trans_enc_state **es)
603 /* Replace the old state, if any. */
605 if (conn->smb1.trans_enc) {
606 TALLOC_FREE(conn->smb1.trans_enc);
608 conn->smb1.trans_enc = es;
610 TALLOC_FREE(conn->smb1.trans_enc);
611 conn->smb1.trans_enc = talloc_move(conn, es);
612 >>>>>>> 7efc635... s3-libsmb: Convert struct smb_trans_enc_state to talloc
615 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
617 return common_encryption_on(conn->smb1.trans_enc);
621 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
623 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
624 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
626 if (NT_STATUS_IS_OK(status)) {
630 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
634 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
638 * Is the SMB command able to hold an AND_X successor
639 * @param[in] cmd The SMB command in question
640 * @retval Can we add a chained request after "cmd"?
642 bool smb1cli_is_andx_req(uint8_t cmd)
662 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
664 size_t num_pending = talloc_array_length(conn->pending);
670 result = conn->smb1.mid++;
671 if ((result == 0) || (result == 0xffff)) {
675 for (i=0; i<num_pending; i++) {
676 if (result == smb1cli_req_mid(conn->pending[i])) {
681 if (i == num_pending) {
687 void smbXcli_req_unset_pending(struct tevent_req *req)
689 struct smbXcli_req_state *state =
691 struct smbXcli_req_state);
692 struct smbXcli_conn *conn = state->conn;
693 size_t num_pending = talloc_array_length(conn->pending);
696 if (state->smb1.mid != 0) {
698 * This is a [nt]trans[2] request which waits
699 * for more than one reply.
704 talloc_set_destructor(req, NULL);
706 if (num_pending == 1) {
708 * The pending read_smb tevent_req is a child of
709 * conn->pending. So if nothing is pending anymore, we need to
710 * delete the socket read fde.
712 TALLOC_FREE(conn->pending);
713 conn->read_smb_req = NULL;
717 for (i=0; i<num_pending; i++) {
718 if (req == conn->pending[i]) {
722 if (i == num_pending) {
724 * Something's seriously broken. Just returning here is the
725 * right thing nevertheless, the point of this routine is to
726 * remove ourselves from conn->pending.
732 * Remove ourselves from the conn->pending array
734 for (; i < (num_pending - 1); i++) {
735 conn->pending[i] = conn->pending[i+1];
739 * No NULL check here, we're shrinking by sizeof(void *), and
740 * talloc_realloc just adjusts the size for this.
742 conn->pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
747 static int smbXcli_req_destructor(struct tevent_req *req)
749 struct smbXcli_req_state *state =
751 struct smbXcli_req_state);
754 * Make sure we really remove it from
755 * the pending array on destruction.
758 smbXcli_req_unset_pending(req);
762 static bool smb1cli_req_cancel(struct tevent_req *req);
763 static bool smb2cli_req_cancel(struct tevent_req *req);
765 static bool smbXcli_req_cancel(struct tevent_req *req)
767 struct smbXcli_req_state *state =
769 struct smbXcli_req_state);
771 if (!smbXcli_conn_is_connected(state->conn)) {
775 if (state->conn->protocol == PROTOCOL_NONE) {
779 if (state->conn->protocol >= PROTOCOL_SMB2_02) {
780 return smb2cli_req_cancel(req);
783 return smb1cli_req_cancel(req);
786 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
788 bool smbXcli_req_set_pending(struct tevent_req *req)
790 struct smbXcli_req_state *state =
792 struct smbXcli_req_state);
793 struct smbXcli_conn *conn;
794 struct tevent_req **pending;
799 if (!smbXcli_conn_is_connected(conn)) {
803 num_pending = talloc_array_length(conn->pending);
805 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
807 if (pending == NULL) {
810 pending[num_pending] = req;
811 conn->pending = pending;
812 talloc_set_destructor(req, smbXcli_req_destructor);
813 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
815 if (!smbXcli_conn_receive_next(conn)) {
817 * the caller should notify the current request
819 * And all other pending requests get notified
820 * by smbXcli_conn_disconnect().
822 smbXcli_req_unset_pending(req);
823 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
830 static void smbXcli_conn_received(struct tevent_req *subreq);
832 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
834 size_t num_pending = talloc_array_length(conn->pending);
835 struct tevent_req *req;
836 struct smbXcli_req_state *state;
838 if (conn->read_smb_req != NULL) {
842 if (num_pending == 0) {
843 if (conn->smb2.mid < UINT64_MAX) {
844 /* no more pending requests, so we are done for now */
849 * If there are no more SMB2 requests possible,
850 * because we are out of message ids,
851 * we need to disconnect.
853 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
857 req = conn->pending[0];
858 state = tevent_req_data(req, struct smbXcli_req_state);
861 * We're the first ones, add the read_smb request that waits for the
862 * answer from the server
864 conn->read_smb_req = read_smb_send(conn->pending,
867 if (conn->read_smb_req == NULL) {
870 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
874 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
876 tevent_queue_stop(conn->outgoing);
878 if (conn->read_fd != -1) {
879 close(conn->read_fd);
881 if (conn->write_fd != -1) {
882 close(conn->write_fd);
888 * Cancel all pending requests. We do not do a for-loop walking
889 * conn->pending because that array changes in
890 * smbXcli_req_unset_pending.
892 while (talloc_array_length(conn->pending) > 0) {
893 struct tevent_req *req;
894 struct smbXcli_req_state *state;
895 struct tevent_req **chain;
899 req = conn->pending[0];
900 state = tevent_req_data(req, struct smbXcli_req_state);
902 if (state->smb1.chained_requests == NULL) {
904 * We're dead. No point waiting for trans2
909 smbXcli_req_unset_pending(req);
911 if (NT_STATUS_IS_OK(status)) {
912 /* do not notify the callers */
917 * we need to defer the callback, because we may notify
918 * more then one caller.
920 tevent_req_defer_callback(req, state->ev);
921 tevent_req_nterror(req, status);
925 chain = talloc_move(conn, &state->smb1.chained_requests);
926 num_chained = talloc_array_length(chain);
928 for (i=0; i<num_chained; i++) {
930 state = tevent_req_data(req, struct smbXcli_req_state);
933 * We're dead. No point waiting for trans2
938 smbXcli_req_unset_pending(req);
940 if (NT_STATUS_IS_OK(status)) {
941 /* do not notify the callers */
946 * we need to defer the callback, because we may notify
947 * more than one caller.
949 tevent_req_defer_callback(req, state->ev);
950 tevent_req_nterror(req, status);
957 * Fetch a smb request's mid. Only valid after the request has been sent by
958 * smb1cli_req_send().
960 uint16_t smb1cli_req_mid(struct tevent_req *req)
962 struct smbXcli_req_state *state =
964 struct smbXcli_req_state);
966 if (state->smb1.mid != 0) {
967 return state->smb1.mid;
970 return SVAL(state->smb1.hdr, HDR_MID);
973 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
975 struct smbXcli_req_state *state =
977 struct smbXcli_req_state);
979 state->smb1.mid = mid;
982 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
984 struct smbXcli_req_state *state =
986 struct smbXcli_req_state);
988 return state->smb1.seqnum;
991 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
993 struct smbXcli_req_state *state =
995 struct smbXcli_req_state);
997 state->smb1.seqnum = seqnum;
1000 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
1004 for (i=0; i<count; i++) {
1005 result += iov[i].iov_len;
1010 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
1011 const struct iovec *iov,
1014 size_t len = smbXcli_iov_len(iov, count);
1019 buf = talloc_array(mem_ctx, uint8_t, len);
1024 for (i=0; i<count; i++) {
1025 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
1026 copied += iov[i].iov_len;
1031 static void smb1cli_req_flags(enum protocol_types protocol,
1032 uint32_t smb1_capabilities,
1033 uint8_t smb_command,
1034 uint8_t additional_flags,
1035 uint8_t clear_flags,
1037 uint16_t additional_flags2,
1038 uint16_t clear_flags2,
1042 uint16_t flags2 = 0;
1044 if (protocol >= PROTOCOL_LANMAN1) {
1045 flags |= FLAG_CASELESS_PATHNAMES;
1046 flags |= FLAG_CANONICAL_PATHNAMES;
1049 if (protocol >= PROTOCOL_LANMAN2) {
1050 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
1051 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
1054 if (protocol >= PROTOCOL_NT1) {
1055 flags2 |= FLAGS2_IS_LONG_NAME;
1057 if (smb1_capabilities & CAP_UNICODE) {
1058 flags2 |= FLAGS2_UNICODE_STRINGS;
1060 if (smb1_capabilities & CAP_STATUS32) {
1061 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
1063 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
1064 flags2 |= FLAGS2_EXTENDED_SECURITY;
1068 flags |= additional_flags;
1069 flags &= ~clear_flags;
1070 flags2 |= additional_flags2;
1071 flags2 &= ~clear_flags2;
1077 static void smb1cli_req_cancel_done(struct tevent_req *subreq);
1079 static bool smb1cli_req_cancel(struct tevent_req *req)
1081 struct smbXcli_req_state *state =
1082 tevent_req_data(req,
1083 struct smbXcli_req_state);
1090 struct tevent_req *subreq;
1093 flags = CVAL(state->smb1.hdr, HDR_FLG);
1094 flags2 = SVAL(state->smb1.hdr, HDR_FLG2);
1095 pid = SVAL(state->smb1.hdr, HDR_PID);
1096 pid |= SVAL(state->smb1.hdr, HDR_PIDHIGH)<<16;
1097 tid = SVAL(state->smb1.hdr, HDR_TID);
1098 uid = SVAL(state->smb1.hdr, HDR_UID);
1099 mid = SVAL(state->smb1.hdr, HDR_MID);
1101 subreq = smb1cli_req_create(state, state->ev,
1109 0, NULL); /* bytes */
1110 if (subreq == NULL) {
1113 smb1cli_req_set_mid(subreq, mid);
1115 status = smb1cli_req_chain_submit(&subreq, 1);
1116 if (!NT_STATUS_IS_OK(status)) {
1117 TALLOC_FREE(subreq);
1120 smb1cli_req_set_mid(subreq, 0);
1122 tevent_req_set_callback(subreq, smb1cli_req_cancel_done, NULL);
1127 static void smb1cli_req_cancel_done(struct tevent_req *subreq)
1129 /* we do not care about the result */
1130 TALLOC_FREE(subreq);
1133 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
1134 struct tevent_context *ev,
1135 struct smbXcli_conn *conn,
1136 uint8_t smb_command,
1137 uint8_t additional_flags,
1138 uint8_t clear_flags,
1139 uint16_t additional_flags2,
1140 uint16_t clear_flags2,
1141 uint32_t timeout_msec,
1145 uint8_t wct, uint16_t *vwv,
1147 struct iovec *bytes_iov)
1149 struct tevent_req *req;
1150 struct smbXcli_req_state *state;
1152 uint16_t flags2 = 0;
1154 if (iov_count > MAX_SMB_IOV) {
1156 * Should not happen :-)
1161 req = tevent_req_create(mem_ctx, &state,
1162 struct smbXcli_req_state);
1169 state->smb1.recv_cmd = 0xFF;
1170 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
1171 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
1172 if (state->smb1.recv_iov == NULL) {
1177 smb1cli_req_flags(conn->protocol,
1178 conn->smb1.capabilities,
1187 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
1188 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
1189 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
1190 SCVAL(state->smb1.hdr, HDR_FLG, flags);
1191 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
1192 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
1193 SSVAL(state->smb1.hdr, HDR_TID, tid);
1194 SSVAL(state->smb1.hdr, HDR_PID, pid);
1195 SSVAL(state->smb1.hdr, HDR_UID, uid);
1196 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
1197 SCVAL(state->smb1.hdr, HDR_WCT, wct);
1199 state->smb1.vwv = vwv;
1201 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
1203 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
1204 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
1205 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
1206 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
1207 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
1208 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
1209 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
1210 state->smb1.iov[3].iov_len = sizeof(uint16_t);
1212 if (iov_count != 0) {
1213 memcpy(&state->smb1.iov[4], bytes_iov,
1214 iov_count * sizeof(*bytes_iov));
1216 state->smb1.iov_count = iov_count + 4;
1218 if (timeout_msec > 0) {
1219 struct timeval endtime;
1221 endtime = timeval_current_ofs_msec(timeout_msec);
1222 if (!tevent_req_set_endtime(req, ev, endtime)) {
1227 switch (smb_command) {
1231 state->one_way = true;
1234 state->one_way = true;
1235 state->smb1.one_way_seqnum = true;
1239 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1240 state->one_way = true;
1248 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1249 struct iovec *iov, int iov_count,
1251 bool one_way_seqnum)
1253 TALLOC_CTX *frame = NULL;
1257 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1258 * iovec directly. MD5Update would do that just fine.
1261 if (iov_count < 4) {
1262 return NT_STATUS_INVALID_PARAMETER_MIX;
1264 if (iov[0].iov_len != NBT_HDR_SIZE) {
1265 return NT_STATUS_INVALID_PARAMETER_MIX;
1267 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1268 return NT_STATUS_INVALID_PARAMETER_MIX;
1270 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1271 return NT_STATUS_INVALID_PARAMETER_MIX;
1273 if (iov[3].iov_len != sizeof(uint16_t)) {
1274 return NT_STATUS_INVALID_PARAMETER_MIX;
1277 frame = talloc_stackframe();
1279 buf = smbXcli_iov_concat(frame, iov, iov_count);
1281 return NT_STATUS_NO_MEMORY;
1284 *seqnum = smb_signing_next_seqnum(conn->smb1.signing,
1286 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1287 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1290 return NT_STATUS_OK;
1293 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1294 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1295 TALLOC_CTX *tmp_mem,
1298 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1299 struct smbXcli_req_state *state,
1300 struct iovec *iov, int iov_count)
1302 struct tevent_req *subreq;
1307 if (!smbXcli_conn_is_connected(state->conn)) {
1308 return NT_STATUS_CONNECTION_DISCONNECTED;
1311 if (state->conn->protocol > PROTOCOL_NT1) {
1312 return NT_STATUS_REVISION_MISMATCH;
1315 if (iov_count < 4) {
1316 return NT_STATUS_INVALID_PARAMETER_MIX;
1318 if (iov[0].iov_len != NBT_HDR_SIZE) {
1319 return NT_STATUS_INVALID_PARAMETER_MIX;
1321 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1322 return NT_STATUS_INVALID_PARAMETER_MIX;
1324 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1325 return NT_STATUS_INVALID_PARAMETER_MIX;
1327 if (iov[3].iov_len != sizeof(uint16_t)) {
1328 return NT_STATUS_INVALID_PARAMETER_MIX;
1331 cmd = CVAL(iov[1].iov_base, HDR_COM);
1332 if (cmd == SMBreadBraw) {
1333 if (smbXcli_conn_has_async_calls(state->conn)) {
1334 return NT_STATUS_INVALID_PARAMETER_MIX;
1336 state->conn->smb1.read_braw_req = req;
1339 if (state->smb1.mid != 0) {
1340 mid = state->smb1.mid;
1342 mid = smb1cli_alloc_mid(state->conn);
1344 SSVAL(iov[1].iov_base, HDR_MID, mid);
1346 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1348 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1349 &state->smb1.seqnum,
1350 state->smb1.one_way_seqnum);
1352 if (!NT_STATUS_IS_OK(status)) {
1357 * If we supported multiple encrytion contexts
1358 * here we'd look up based on tid.
1360 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1361 char *buf, *enc_buf;
1363 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1365 return NT_STATUS_NO_MEMORY;
1367 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1368 (char *)buf, &enc_buf);
1370 if (!NT_STATUS_IS_OK(status)) {
1371 DEBUG(0, ("Error in encrypting client message: %s\n",
1372 nt_errstr(status)));
1375 buf = (char *)talloc_memdup(state, enc_buf,
1376 smb_len_nbt(enc_buf)+4);
1379 return NT_STATUS_NO_MEMORY;
1381 iov[0].iov_base = (void *)buf;
1382 iov[0].iov_len = talloc_get_size(buf);
1386 if (state->conn->dispatch_incoming == NULL) {
1387 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1390 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
1392 subreq = writev_send(state, state->ev, state->conn->outgoing,
1393 state->conn->write_fd, false, iov, iov_count);
1394 if (subreq == NULL) {
1395 return NT_STATUS_NO_MEMORY;
1397 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1398 return NT_STATUS_OK;
1401 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1402 struct tevent_context *ev,
1403 struct smbXcli_conn *conn,
1404 uint8_t smb_command,
1405 uint8_t additional_flags,
1406 uint8_t clear_flags,
1407 uint16_t additional_flags2,
1408 uint16_t clear_flags2,
1409 uint32_t timeout_msec,
1413 uint8_t wct, uint16_t *vwv,
1415 const uint8_t *bytes)
1417 struct tevent_req *req;
1421 iov.iov_base = discard_const_p(void, bytes);
1422 iov.iov_len = num_bytes;
1424 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1425 additional_flags, clear_flags,
1426 additional_flags2, clear_flags2,
1433 if (!tevent_req_is_in_progress(req)) {
1434 return tevent_req_post(req, ev);
1436 status = smb1cli_req_chain_submit(&req, 1);
1437 if (tevent_req_nterror(req, status)) {
1438 return tevent_req_post(req, ev);
1443 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1445 struct tevent_req *req =
1446 tevent_req_callback_data(subreq,
1448 struct smbXcli_req_state *state =
1449 tevent_req_data(req,
1450 struct smbXcli_req_state);
1454 nwritten = writev_recv(subreq, &err);
1455 TALLOC_FREE(subreq);
1456 if (nwritten == -1) {
1457 NTSTATUS status = map_nt_error_from_unix_common(err);
1458 smbXcli_conn_disconnect(state->conn, status);
1462 if (state->one_way) {
1463 state->inbuf = NULL;
1464 tevent_req_done(req);
1468 if (!smbXcli_req_set_pending(req)) {
1469 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1474 static void smbXcli_conn_received(struct tevent_req *subreq)
1476 struct smbXcli_conn *conn =
1477 tevent_req_callback_data(subreq,
1478 struct smbXcli_conn);
1479 TALLOC_CTX *frame = talloc_stackframe();
1485 if (subreq != conn->read_smb_req) {
1486 DEBUG(1, ("Internal error: cli_smb_received called with "
1487 "unexpected subreq\n"));
1488 status = NT_STATUS_INTERNAL_ERROR;
1489 smbXcli_conn_disconnect(conn, status);
1493 conn->read_smb_req = NULL;
1495 received = read_smb_recv(subreq, frame, &inbuf, &err);
1496 TALLOC_FREE(subreq);
1497 if (received == -1) {
1498 status = map_nt_error_from_unix_common(err);
1499 smbXcli_conn_disconnect(conn, status);
1504 status = conn->dispatch_incoming(conn, frame, inbuf);
1506 if (NT_STATUS_IS_OK(status)) {
1508 * We should not do any more processing
1509 * as the dispatch function called
1510 * tevent_req_done().
1513 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1515 * We got an error, so notify all pending requests
1517 smbXcli_conn_disconnect(conn, status);
1522 * We got NT_STATUS_RETRY, so we may ask for a
1523 * next incoming pdu.
1525 if (!smbXcli_conn_receive_next(conn)) {
1526 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1530 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1531 struct iovec **piov, int *pnum_iov)
1542 buflen = smb_len_nbt(buf);
1545 hdr = buf + NBT_HDR_SIZE;
1547 if (buflen < MIN_SMB_SIZE) {
1548 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1552 * This returns iovec elements in the following order:
1567 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1569 return NT_STATUS_NO_MEMORY;
1571 iov[0].iov_base = hdr;
1572 iov[0].iov_len = HDR_WCT;
1575 cmd = CVAL(hdr, HDR_COM);
1579 size_t len = buflen - taken;
1581 struct iovec *iov_tmp;
1588 * we need at least WCT and BCC
1590 needed = sizeof(uint8_t) + sizeof(uint16_t);
1592 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1593 __location__, (int)len, (int)needed));
1598 * Now we check if the specified words are there
1600 wct = CVAL(hdr, wct_ofs);
1601 needed += wct * sizeof(uint16_t);
1603 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1604 __location__, (int)len, (int)needed));
1609 * Now we check if the specified bytes are there
1611 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1612 bcc = SVAL(hdr, bcc_ofs);
1613 needed += bcc * sizeof(uint8_t);
1615 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1616 __location__, (int)len, (int)needed));
1621 * we allocate 2 iovec structures for words and bytes
1623 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1625 if (iov_tmp == NULL) {
1627 return NT_STATUS_NO_MEMORY;
1630 cur = &iov[num_iov];
1633 cur[0].iov_len = wct * sizeof(uint16_t);
1634 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1635 cur[1].iov_len = bcc * sizeof(uint8_t);
1636 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1640 if (!smb1cli_is_andx_req(cmd)) {
1642 * If the current command does not have AndX chanining
1648 if (wct == 0 && bcc == 0) {
1650 * An empty response also ends the chain,
1651 * most likely with an error.
1657 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1658 __location__, (int)wct, (int)cmd));
1661 cmd = CVAL(cur[0].iov_base, 0);
1664 * If it is the end of the chain we are also done.
1668 wct_ofs = SVAL(cur[0].iov_base, 2);
1670 if (wct_ofs < taken) {
1671 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1673 if (wct_ofs > buflen) {
1674 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1678 * we consumed everything up to the start of the next
1684 remaining = buflen - taken;
1686 if (remaining > 0 && num_iov >= 3) {
1688 * The last DATA block gets the remaining
1689 * bytes, this is needed to support
1690 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1692 iov[num_iov-1].iov_len += remaining;
1696 *pnum_iov = num_iov;
1697 return NT_STATUS_OK;
1701 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1704 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1705 TALLOC_CTX *tmp_mem,
1708 struct tevent_req *req;
1709 struct smbXcli_req_state *state;
1716 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1717 struct iovec *iov = NULL;
1719 struct tevent_req **chain = NULL;
1720 size_t num_chained = 0;
1721 size_t num_responses = 0;
1723 if (conn->smb1.read_braw_req != NULL) {
1724 req = conn->smb1.read_braw_req;
1725 conn->smb1.read_braw_req = NULL;
1726 state = tevent_req_data(req, struct smbXcli_req_state);
1728 smbXcli_req_unset_pending(req);
1730 if (state->smb1.recv_iov == NULL) {
1732 * For requests with more than
1733 * one response, we have to readd the
1736 state->smb1.recv_iov = talloc_zero_array(state,
1739 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1740 return NT_STATUS_OK;
1744 state->smb1.recv_iov[0].iov_base = (void *)(inbuf + NBT_HDR_SIZE);
1745 state->smb1.recv_iov[0].iov_len = smb_len_nbt(inbuf);
1746 ZERO_STRUCT(state->smb1.recv_iov[1]);
1747 ZERO_STRUCT(state->smb1.recv_iov[2]);
1749 state->smb1.recv_cmd = SMBreadBraw;
1750 state->smb1.recv_status = NT_STATUS_OK;
1751 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1753 tevent_req_done(req);
1754 return NT_STATUS_OK;
1757 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1758 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1759 DEBUG(10, ("Got non-SMB PDU\n"));
1760 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1764 * If we supported multiple encrytion contexts
1765 * here we'd look up based on tid.
1767 if (common_encryption_on(conn->smb1.trans_enc)
1768 && (CVAL(inbuf, 0) == 0)) {
1769 uint16_t enc_ctx_num;
1771 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1772 if (!NT_STATUS_IS_OK(status)) {
1773 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1774 nt_errstr(status)));
1778 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1779 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1781 conn->smb1.trans_enc->enc_ctx_num));
1782 return NT_STATUS_INVALID_HANDLE;
1785 status = common_decrypt_buffer(conn->smb1.trans_enc,
1787 if (!NT_STATUS_IS_OK(status)) {
1788 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1789 nt_errstr(status)));
1794 mid = SVAL(inhdr, HDR_MID);
1795 num_pending = talloc_array_length(conn->pending);
1797 for (i=0; i<num_pending; i++) {
1798 if (mid == smb1cli_req_mid(conn->pending[i])) {
1802 if (i == num_pending) {
1803 /* Dump unexpected reply */
1804 return NT_STATUS_RETRY;
1807 oplock_break = false;
1809 if (mid == 0xffff) {
1811 * Paranoia checks that this is really an oplock break request.
1813 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1814 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1815 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1816 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1817 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1819 if (!oplock_break) {
1820 /* Dump unexpected reply */
1821 return NT_STATUS_RETRY;
1825 req = conn->pending[i];
1826 state = tevent_req_data(req, struct smbXcli_req_state);
1828 if (!oplock_break /* oplock breaks are not signed */
1829 && !smb_signing_check_pdu(conn->smb1.signing,
1830 inbuf, state->smb1.seqnum+1)) {
1831 DEBUG(10, ("cli_check_sign_mac failed\n"));
1832 return NT_STATUS_ACCESS_DENIED;
1835 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1837 if (!NT_STATUS_IS_OK(status)) {
1838 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1839 nt_errstr(status)));
1843 cmd = CVAL(inhdr, HDR_COM);
1844 status = smb1cli_pull_raw_error(inhdr);
1846 if (state->smb1.chained_requests == NULL) {
1848 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1851 smbXcli_req_unset_pending(req);
1853 if (state->smb1.recv_iov == NULL) {
1855 * For requests with more than
1856 * one response, we have to readd the
1859 state->smb1.recv_iov = talloc_zero_array(state,
1862 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1863 return NT_STATUS_OK;
1867 state->smb1.recv_cmd = cmd;
1868 state->smb1.recv_status = status;
1869 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1871 state->smb1.recv_iov[0] = iov[0];
1872 state->smb1.recv_iov[1] = iov[1];
1873 state->smb1.recv_iov[2] = iov[2];
1875 if (talloc_array_length(conn->pending) == 0) {
1876 tevent_req_done(req);
1877 return NT_STATUS_OK;
1880 tevent_req_defer_callback(req, state->ev);
1881 tevent_req_done(req);
1882 return NT_STATUS_RETRY;
1885 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1886 num_chained = talloc_array_length(chain);
1887 num_responses = (num_iov - 1)/2;
1889 if (num_responses > num_chained) {
1890 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1893 for (i=0; i<num_chained; i++) {
1894 size_t iov_idx = 1 + (i*2);
1895 struct iovec *cur = &iov[iov_idx];
1899 state = tevent_req_data(req, struct smbXcli_req_state);
1901 smbXcli_req_unset_pending(req);
1904 * as we finish multiple requests here
1905 * we need to defer the callbacks as
1906 * they could destroy our current stack state.
1908 tevent_req_defer_callback(req, state->ev);
1910 if (i >= num_responses) {
1911 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1915 if (state->smb1.recv_iov == NULL) {
1917 * For requests with more than
1918 * one response, we have to readd the
1921 state->smb1.recv_iov = talloc_zero_array(state,
1924 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1929 state->smb1.recv_cmd = cmd;
1931 if (i == (num_responses - 1)) {
1933 * The last request in the chain gets the status
1935 state->smb1.recv_status = status;
1937 cmd = CVAL(cur[0].iov_base, 0);
1938 state->smb1.recv_status = NT_STATUS_OK;
1941 state->inbuf = inbuf;
1944 * Note: here we use talloc_reference() in a way
1945 * that does not expose it to the caller.
1947 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1948 if (tevent_req_nomem(inbuf_ref, req)) {
1952 /* copy the related buffers */
1953 state->smb1.recv_iov[0] = iov[0];
1954 state->smb1.recv_iov[1] = cur[0];
1955 state->smb1.recv_iov[2] = cur[1];
1957 tevent_req_done(req);
1960 return NT_STATUS_RETRY;
1963 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1964 TALLOC_CTX *mem_ctx,
1965 struct iovec **piov,
1969 uint32_t *pvwv_offset,
1970 uint32_t *pnum_bytes,
1972 uint32_t *pbytes_offset,
1974 const struct smb1cli_req_expected_response *expected,
1975 size_t num_expected)
1977 struct smbXcli_req_state *state =
1978 tevent_req_data(req,
1979 struct smbXcli_req_state);
1980 NTSTATUS status = NT_STATUS_OK;
1981 struct iovec *recv_iov = NULL;
1982 uint8_t *hdr = NULL;
1984 uint32_t vwv_offset = 0;
1985 uint16_t *vwv = NULL;
1986 uint32_t num_bytes = 0;
1987 uint32_t bytes_offset = 0;
1988 uint8_t *bytes = NULL;
1990 bool found_status = false;
1991 bool found_size = false;
2005 if (pvwv_offset != NULL) {
2008 if (pnum_bytes != NULL) {
2011 if (pbytes != NULL) {
2014 if (pbytes_offset != NULL) {
2017 if (pinbuf != NULL) {
2021 if (state->inbuf != NULL) {
2022 recv_iov = state->smb1.recv_iov;
2023 state->smb1.recv_iov = NULL;
2024 if (state->smb1.recv_cmd != SMBreadBraw) {
2025 hdr = (uint8_t *)recv_iov[0].iov_base;
2026 wct = recv_iov[1].iov_len/2;
2027 vwv = (uint16_t *)recv_iov[1].iov_base;
2028 vwv_offset = PTR_DIFF(vwv, hdr);
2029 num_bytes = recv_iov[2].iov_len;
2030 bytes = (uint8_t *)recv_iov[2].iov_base;
2031 bytes_offset = PTR_DIFF(bytes, hdr);
2035 if (tevent_req_is_nterror(req, &status)) {
2036 for (i=0; i < num_expected; i++) {
2037 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2038 found_status = true;
2044 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2050 if (num_expected == 0) {
2051 found_status = true;
2055 status = state->smb1.recv_status;
2057 for (i=0; i < num_expected; i++) {
2058 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2062 found_status = true;
2063 if (expected[i].wct == 0) {
2068 if (expected[i].wct == wct) {
2074 if (!found_status) {
2079 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2083 *piov = talloc_move(mem_ctx, &recv_iov);
2095 if (pvwv_offset != NULL) {
2096 *pvwv_offset = vwv_offset;
2098 if (pnum_bytes != NULL) {
2099 *pnum_bytes = num_bytes;
2101 if (pbytes != NULL) {
2104 if (pbytes_offset != NULL) {
2105 *pbytes_offset = bytes_offset;
2107 if (pinbuf != NULL) {
2108 *pinbuf = state->inbuf;
2114 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
2121 for (i=0; i<num_reqs; i++) {
2122 struct smbXcli_req_state *state;
2123 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2124 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
2125 state->smb1.iov_count-2);
2126 wct_ofs = (wct_ofs + 3) & ~3;
2131 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
2133 struct smbXcli_req_state *first_state =
2134 tevent_req_data(reqs[0],
2135 struct smbXcli_req_state);
2136 struct smbXcli_req_state *state;
2138 size_t chain_padding = 0;
2140 struct iovec *iov = NULL;
2141 struct iovec *this_iov;
2145 if (num_reqs == 1) {
2146 return smb1cli_req_writev_submit(reqs[0], first_state,
2147 first_state->smb1.iov,
2148 first_state->smb1.iov_count);
2152 for (i=0; i<num_reqs; i++) {
2153 if (!tevent_req_is_in_progress(reqs[i])) {
2154 return NT_STATUS_INTERNAL_ERROR;
2157 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2159 if (state->smb1.iov_count < 4) {
2160 return NT_STATUS_INVALID_PARAMETER_MIX;
2165 * The NBT and SMB header
2178 iovlen += state->smb1.iov_count - 2;
2181 iov = talloc_zero_array(first_state, struct iovec, iovlen);
2183 return NT_STATUS_NO_MEMORY;
2186 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
2187 first_state, reqs, sizeof(*reqs) * num_reqs);
2188 if (first_state->smb1.chained_requests == NULL) {
2190 return NT_STATUS_NO_MEMORY;
2193 wct_offset = HDR_WCT;
2196 for (i=0; i<num_reqs; i++) {
2197 size_t next_padding = 0;
2200 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2202 if (i < num_reqs-1) {
2203 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
2204 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
2206 TALLOC_FREE(first_state->smb1.chained_requests);
2207 return NT_STATUS_INVALID_PARAMETER_MIX;
2211 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
2212 state->smb1.iov_count-2) + 1;
2213 if ((wct_offset % 4) != 0) {
2214 next_padding = 4 - (wct_offset % 4);
2216 wct_offset += next_padding;
2217 vwv = state->smb1.vwv;
2219 if (i < num_reqs-1) {
2220 struct smbXcli_req_state *next_state =
2221 tevent_req_data(reqs[i+1],
2222 struct smbXcli_req_state);
2223 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
2225 SSVAL(vwv+1, 0, wct_offset);
2226 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
2227 /* properly end the chain */
2228 SCVAL(vwv+0, 0, 0xff);
2229 SCVAL(vwv+0, 1, 0xff);
2235 * The NBT and SMB header
2237 this_iov[0] = state->smb1.iov[0];
2238 this_iov[1] = state->smb1.iov[1];
2242 * This one is a bit subtle. We have to add
2243 * chain_padding bytes between the requests, and we
2244 * have to also include the wct field of the
2245 * subsequent requests. We use the subsequent header
2246 * for the padding, it contains the wct field in its
2249 this_iov[0].iov_len = chain_padding+1;
2250 this_iov[0].iov_base = (void *)&state->smb1.hdr[
2251 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
2252 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
2257 * copy the words and bytes
2259 memcpy(this_iov, state->smb1.iov+2,
2260 sizeof(struct iovec) * (state->smb1.iov_count-2));
2261 this_iov += state->smb1.iov_count - 2;
2262 chain_padding = next_padding;
2265 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
2266 if (nbt_len > first_state->conn->smb1.max_xmit) {
2268 TALLOC_FREE(first_state->smb1.chained_requests);
2269 return NT_STATUS_INVALID_PARAMETER_MIX;
2272 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
2273 if (!NT_STATUS_IS_OK(status)) {
2275 TALLOC_FREE(first_state->smb1.chained_requests);
2279 return NT_STATUS_OK;
2282 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
2284 return ((tevent_queue_length(conn->outgoing) != 0)
2285 || (talloc_array_length(conn->pending) != 0));
2288 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
2290 return conn->smb2.server.capabilities;
2293 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
2295 return conn->smb2.server.security_mode;
2298 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
2300 return conn->smb2.server.max_trans_size;
2303 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
2305 return conn->smb2.server.max_read_size;
2308 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
2310 return conn->smb2.server.max_write_size;
2313 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
2314 uint16_t max_credits)
2316 conn->smb2.max_credits = max_credits;
2319 static void smb2cli_req_cancel_done(struct tevent_req *subreq);
2321 static bool smb2cli_req_cancel(struct tevent_req *req)
2323 struct smbXcli_req_state *state =
2324 tevent_req_data(req,
2325 struct smbXcli_req_state);
2326 uint32_t flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2327 uint32_t pid = IVAL(state->smb2.hdr, SMB2_HDR_PID);
2328 uint32_t tid = IVAL(state->smb2.hdr, SMB2_HDR_TID);
2329 uint64_t mid = BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID);
2330 uint64_t aid = BVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID);
2331 struct smbXcli_session *session = state->session;
2332 uint8_t *fixed = state->smb2.pad;
2333 uint16_t fixed_len = 4;
2334 struct tevent_req *subreq;
2335 struct smbXcli_req_state *substate;
2338 SSVAL(fixed, 0, 0x04);
2341 subreq = smb2cli_req_create(state, state->ev,
2349 if (subreq == NULL) {
2352 substate = tevent_req_data(subreq, struct smbXcli_req_state);
2354 if (flags & SMB2_HDR_FLAG_ASYNC) {
2358 SIVAL(substate->smb2.hdr, SMB2_HDR_FLAGS, flags);
2359 SIVAL(substate->smb2.hdr, SMB2_HDR_PID, pid);
2360 SIVAL(substate->smb2.hdr, SMB2_HDR_TID, tid);
2361 SBVAL(substate->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2362 SBVAL(substate->smb2.hdr, SMB2_HDR_ASYNC_ID, aid);
2364 status = smb2cli_req_compound_submit(&subreq, 1);
2365 if (!NT_STATUS_IS_OK(status)) {
2366 TALLOC_FREE(subreq);
2370 tevent_req_set_callback(subreq, smb2cli_req_cancel_done, NULL);
2375 static void smb2cli_req_cancel_done(struct tevent_req *subreq)
2377 /* we do not care about the result */
2378 TALLOC_FREE(subreq);
2381 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2382 struct tevent_context *ev,
2383 struct smbXcli_conn *conn,
2385 uint32_t additional_flags,
2386 uint32_t clear_flags,
2387 uint32_t timeout_msec,
2390 struct smbXcli_session *session,
2391 const uint8_t *fixed,
2396 struct tevent_req *req;
2397 struct smbXcli_req_state *state;
2401 req = tevent_req_create(mem_ctx, &state,
2402 struct smbXcli_req_state);
2409 state->session = session;
2412 uid = session->smb2.session_id;
2415 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2416 if (state->smb2.recv_iov == NULL) {
2421 flags |= additional_flags;
2422 flags &= ~clear_flags;
2424 state->smb2.fixed = fixed;
2425 state->smb2.fixed_len = fixed_len;
2426 state->smb2.dyn = dyn;
2427 state->smb2.dyn_len = dyn_len;
2429 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2430 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2431 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2432 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2433 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2434 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2435 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2438 case SMB2_OP_CANCEL:
2439 state->one_way = true;
2443 * If this is a dummy request, it will have
2444 * UINT64_MAX as message id.
2445 * If we send on break acknowledgement,
2446 * this gets overwritten later.
2448 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2452 if (timeout_msec > 0) {
2453 struct timeval endtime;
2455 endtime = timeval_current_ofs_msec(timeout_msec);
2456 if (!tevent_req_set_endtime(req, ev, endtime)) {
2464 void smb2cli_req_set_notify_async(struct tevent_req *req)
2466 struct smbXcli_req_state *state =
2467 tevent_req_data(req,
2468 struct smbXcli_req_state);
2470 state->smb2.notify_async = true;
2473 static void smb2cli_req_writev_done(struct tevent_req *subreq);
2474 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2475 TALLOC_CTX *tmp_mem,
2478 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2481 struct smbXcli_req_state *state;
2482 struct tevent_req *subreq;
2484 int i, num_iov, nbt_len;
2487 * 1 for the nbt length
2488 * per request: HDR, fixed, dyn, padding
2489 * -1 because the last one does not need padding
2492 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2494 return NT_STATUS_NO_MEMORY;
2500 for (i=0; i<num_reqs; i++) {
2509 const DATA_BLOB *signing_key = NULL;
2511 if (!tevent_req_is_in_progress(reqs[i])) {
2512 return NT_STATUS_INTERNAL_ERROR;
2515 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2517 if (!smbXcli_conn_is_connected(state->conn)) {
2518 return NT_STATUS_CONNECTION_DISCONNECTED;
2521 if ((state->conn->protocol != PROTOCOL_NONE) &&
2522 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2523 return NT_STATUS_REVISION_MISMATCH;
2526 opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2527 if (opcode == SMB2_OP_CANCEL) {
2531 avail = UINT64_MAX - state->conn->smb2.mid;
2533 return NT_STATUS_CONNECTION_ABORTED;
2536 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2537 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2542 charge = MAX(state->smb2.credit_charge, charge);
2544 avail = MIN(avail, state->conn->smb2.cur_credits);
2545 if (avail < charge) {
2546 return NT_STATUS_INTERNAL_ERROR;
2550 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2551 credits = state->conn->smb2.max_credits -
2552 state->conn->smb2.cur_credits;
2554 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2558 mid = state->conn->smb2.mid;
2559 state->conn->smb2.mid += charge;
2560 state->conn->smb2.cur_credits -= charge;
2562 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2563 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2565 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2566 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2570 iov[num_iov].iov_base = state->smb2.hdr;
2571 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2574 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2575 iov[num_iov].iov_len = state->smb2.fixed_len;
2578 if (state->smb2.dyn != NULL) {
2579 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2580 iov[num_iov].iov_len = state->smb2.dyn_len;
2584 reqlen = sizeof(state->smb2.hdr);
2585 reqlen += state->smb2.fixed_len;
2586 reqlen += state->smb2.dyn_len;
2588 if (i < num_reqs-1) {
2589 if ((reqlen % 8) > 0) {
2590 uint8_t pad = 8 - (reqlen % 8);
2591 iov[num_iov].iov_base = state->smb2.pad;
2592 iov[num_iov].iov_len = pad;
2596 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2600 if (state->session) {
2601 bool should_sign = state->session->smb2.should_sign;
2603 if (opcode == SMB2_OP_SESSSETUP &&
2604 state->session->smb2.signing_key.length != 0) {
2609 * We prefer the channel signing key if it is
2613 signing_key = &state->session->smb2.channel_signing_key;
2617 * If it is a channel binding, we already have the main
2618 * signing key and try that one.
2620 if (signing_key && signing_key->length == 0) {
2621 signing_key = &state->session->smb2.signing_key;
2625 * If we do not have any session key yet, we skip the
2626 * signing of SMB2_OP_SESSSETUP requests.
2628 if (signing_key && signing_key->length == 0) {
2636 status = smb2_signing_sign_pdu(*signing_key,
2637 state->session->conn->protocol,
2638 &iov[hdr_iov], num_iov - hdr_iov);
2639 if (!NT_STATUS_IS_OK(status)) {
2644 ret = smbXcli_req_set_pending(reqs[i]);
2646 return NT_STATUS_NO_MEMORY;
2650 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2651 _smb_setlen_tcp(state->length_hdr, nbt_len);
2652 iov[0].iov_base = state->length_hdr;
2653 iov[0].iov_len = sizeof(state->length_hdr);
2655 if (state->conn->dispatch_incoming == NULL) {
2656 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2659 subreq = writev_send(state, state->ev, state->conn->outgoing,
2660 state->conn->write_fd, false, iov, num_iov);
2661 if (subreq == NULL) {
2662 return NT_STATUS_NO_MEMORY;
2664 tevent_req_set_callback(subreq, smb2cli_req_writev_done, reqs[0]);
2665 return NT_STATUS_OK;
2668 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2670 struct smbXcli_req_state *state =
2671 tevent_req_data(req,
2672 struct smbXcli_req_state);
2674 state->smb2.credit_charge = charge;
2677 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2678 struct tevent_context *ev,
2679 struct smbXcli_conn *conn,
2681 uint32_t additional_flags,
2682 uint32_t clear_flags,
2683 uint32_t timeout_msec,
2686 struct smbXcli_session *session,
2687 const uint8_t *fixed,
2692 struct tevent_req *req;
2695 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2696 additional_flags, clear_flags,
2699 fixed, fixed_len, dyn, dyn_len);
2703 if (!tevent_req_is_in_progress(req)) {
2704 return tevent_req_post(req, ev);
2706 status = smb2cli_req_compound_submit(&req, 1);
2707 if (tevent_req_nterror(req, status)) {
2708 return tevent_req_post(req, ev);
2713 static void smb2cli_req_writev_done(struct tevent_req *subreq)
2715 struct tevent_req *req =
2716 tevent_req_callback_data(subreq,
2718 struct smbXcli_req_state *state =
2719 tevent_req_data(req,
2720 struct smbXcli_req_state);
2724 nwritten = writev_recv(subreq, &err);
2725 TALLOC_FREE(subreq);
2726 if (nwritten == -1) {
2727 /* here, we need to notify all pending requests */
2728 NTSTATUS status = map_nt_error_from_unix_common(err);
2729 smbXcli_conn_disconnect(state->conn, status);
2734 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2735 struct iovec **piov, int *pnum_iov)
2745 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2747 return NT_STATUS_NO_MEMORY;
2750 buflen = smb_len_tcp(buf);
2752 first_hdr = buf + NBT_HDR_SIZE;
2754 while (taken < buflen) {
2755 size_t len = buflen - taken;
2756 uint8_t *hdr = first_hdr + taken;
2759 size_t next_command_ofs;
2761 struct iovec *iov_tmp;
2764 * We need the header plus the body length field
2767 if (len < SMB2_HDR_BODY + 2) {
2768 DEBUG(10, ("%d bytes left, expected at least %d\n",
2769 (int)len, SMB2_HDR_BODY));
2772 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2773 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2777 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2778 DEBUG(10, ("Got HDR len %d, expected %d\n",
2779 SVAL(hdr, 4), SMB2_HDR_BODY));
2784 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2785 body_size = SVAL(hdr, SMB2_HDR_BODY);
2787 if (next_command_ofs != 0) {
2788 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2791 if (next_command_ofs > full_size) {
2794 full_size = next_command_ofs;
2796 if (body_size < 2) {
2799 body_size &= 0xfffe;
2801 if (body_size > (full_size - SMB2_HDR_BODY)) {
2805 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2807 if (iov_tmp == NULL) {
2809 return NT_STATUS_NO_MEMORY;
2812 cur = &iov[num_iov];
2815 cur[0].iov_base = hdr;
2816 cur[0].iov_len = SMB2_HDR_BODY;
2817 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2818 cur[1].iov_len = body_size;
2819 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2820 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2826 *pnum_iov = num_iov;
2827 return NT_STATUS_OK;
2831 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2834 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2837 size_t num_pending = talloc_array_length(conn->pending);
2840 for (i=0; i<num_pending; i++) {
2841 struct tevent_req *req = conn->pending[i];
2842 struct smbXcli_req_state *state =
2843 tevent_req_data(req,
2844 struct smbXcli_req_state);
2846 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2853 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2854 TALLOC_CTX *tmp_mem,
2857 struct tevent_req *req;
2858 struct smbXcli_req_state *state = NULL;
2863 struct smbXcli_session *last_session = NULL;
2865 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2867 if (!NT_STATUS_IS_OK(status)) {
2871 for (i=0; i<num_iov; i+=3) {
2872 uint8_t *inbuf_ref = NULL;
2873 struct iovec *cur = &iov[i];
2874 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2875 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2876 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2877 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2878 uint16_t req_opcode;
2880 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2881 uint32_t new_credits;
2882 struct smbXcli_session *session = NULL;
2883 const DATA_BLOB *signing_key = NULL;
2884 bool should_sign = false;
2886 new_credits = conn->smb2.cur_credits;
2887 new_credits += credits;
2888 if (new_credits > UINT16_MAX) {
2889 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2891 conn->smb2.cur_credits += credits;
2893 req = smb2cli_conn_find_pending(conn, mid);
2895 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2897 state = tevent_req_data(req, struct smbXcli_req_state);
2899 state->smb2.got_async = false;
2901 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2902 if (opcode != req_opcode) {
2903 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2905 req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2907 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2908 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2911 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2912 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2913 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2914 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2917 * async interim responses are not signed,
2918 * even if the SMB2_HDR_FLAG_SIGNED flag
2921 req_flags |= SMB2_HDR_FLAG_ASYNC;
2922 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2923 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2925 if (state->smb2.notify_async) {
2926 state->smb2.got_async = true;
2927 tevent_req_defer_callback(req, state->ev);
2928 tevent_req_notify_callback(req);
2933 session = state->session;
2934 if (req_flags & SMB2_HDR_FLAG_CHAINED) {
2935 session = last_session;
2937 last_session = session;
2940 should_sign = session->smb2.should_sign;
2941 if (opcode == SMB2_OP_SESSSETUP &&
2942 session->smb2.signing_key.length != 0) {
2948 if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
2949 return NT_STATUS_ACCESS_DENIED;
2953 if (flags & SMB2_HDR_FLAG_SIGNED) {
2954 uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2956 if (session == NULL) {
2957 struct smbXcli_session *s;
2959 s = state->conn->sessions;
2960 for (; s; s = s->next) {
2961 if (s->smb2.session_id != uid) {
2970 if (session == NULL) {
2971 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2974 last_session = session;
2975 signing_key = &session->smb2.channel_signing_key;
2978 if (opcode == SMB2_OP_SESSSETUP) {
2980 * We prefer the channel signing key, if it is
2983 * If we do not have a channel signing key yet,
2984 * we try the main signing key, if it is not
2985 * the final response.
2987 if (signing_key && signing_key->length == 0 &&
2988 !NT_STATUS_IS_OK(status)) {
2989 signing_key = &session->smb2.signing_key;
2992 if (signing_key && signing_key->length == 0) {
2994 * If we do not have a session key to
2995 * verify the signature, we defer the
2996 * signing check to the caller.
2998 * The caller gets NT_STATUS_OK, it
3000 * smb2cli_session_set_session_key()
3002 * smb2cli_session_set_channel_key()
3003 * which will check the signature
3004 * with the channel signing key.
3010 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
3012 * if the server returns NT_STATUS_USER_SESSION_DELETED
3013 * the response is not signed and we should
3014 * propagate the NT_STATUS_USER_SESSION_DELETED
3015 * status to the caller.
3020 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) ||
3021 NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) ||
3022 NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) {
3024 * if the server returns
3025 * NT_STATUS_NETWORK_NAME_DELETED
3026 * NT_STATUS_FILE_CLOSED
3027 * NT_STATUS_INVALID_PARAMETER
3028 * the response might not be signed
3029 * as this happens before the signing checks.
3031 * If server echos the signature (or all zeros)
3032 * we should report the status from the server
3038 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
3039 state->smb2.hdr+SMB2_HDR_SIGNATURE,
3042 state->smb2.signing_skipped = true;
3048 static const uint8_t zeros[16];
3050 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
3054 state->smb2.signing_skipped = true;
3061 status = smb2_signing_check_pdu(*signing_key,
3062 state->conn->protocol,
3064 if (!NT_STATUS_IS_OK(status)) {
3066 * If the signing check fails, we disconnect
3073 smbXcli_req_unset_pending(req);
3076 * There might be more than one response
3077 * we need to defer the notifications
3079 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
3084 tevent_req_defer_callback(req, state->ev);
3088 * Note: here we use talloc_reference() in a way
3089 * that does not expose it to the caller.
3091 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
3092 if (tevent_req_nomem(inbuf_ref, req)) {
3096 /* copy the related buffers */
3097 state->smb2.recv_iov[0] = cur[0];
3098 state->smb2.recv_iov[1] = cur[1];
3099 state->smb2.recv_iov[2] = cur[2];
3101 tevent_req_done(req);
3105 return NT_STATUS_RETRY;
3108 return NT_STATUS_OK;
3111 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
3112 struct iovec **piov,
3113 const struct smb2cli_req_expected_response *expected,
3114 size_t num_expected)
3116 struct smbXcli_req_state *state =
3117 tevent_req_data(req,
3118 struct smbXcli_req_state);
3121 bool found_status = false;
3122 bool found_size = false;
3129 if (state->smb2.got_async) {
3130 return STATUS_PENDING;
3133 if (tevent_req_is_nterror(req, &status)) {
3134 for (i=0; i < num_expected; i++) {
3135 if (NT_STATUS_EQUAL(status, expected[i].status)) {
3136 found_status = true;
3142 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
3148 if (num_expected == 0) {
3149 found_status = true;
3153 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
3154 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
3156 for (i=0; i < num_expected; i++) {
3157 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
3161 found_status = true;
3162 if (expected[i].body_size == 0) {
3167 if (expected[i].body_size == body_size) {
3173 if (!found_status) {
3177 if (state->smb2.signing_skipped) {
3178 if (num_expected > 0) {
3179 return NT_STATUS_ACCESS_DENIED;
3181 if (!NT_STATUS_IS_ERR(status)) {
3182 return NT_STATUS_ACCESS_DENIED;
3187 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3191 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
3197 static const struct {
3198 enum protocol_types proto;
3199 const char *smb1_name;
3200 } smb1cli_prots[] = {
3201 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
3202 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
3203 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
3204 {PROTOCOL_LANMAN1, "LANMAN1.0"},
3205 {PROTOCOL_LANMAN2, "LM1.2X002"},
3206 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
3207 {PROTOCOL_LANMAN2, "LANMAN2.1"},
3208 {PROTOCOL_LANMAN2, "Samba"},
3209 {PROTOCOL_NT1, "NT LANMAN 1.0"},
3210 {PROTOCOL_NT1, "NT LM 0.12"},
3211 {PROTOCOL_SMB2_02, "SMB 2.002"},
3212 {PROTOCOL_SMB2_10, "SMB 2.???"},
3215 static const struct {
3216 enum protocol_types proto;
3217 uint16_t smb2_dialect;
3218 } smb2cli_prots[] = {
3219 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
3220 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
3221 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
3222 {PROTOCOL_SMB2_24, SMB2_DIALECT_REVISION_224},
3223 {PROTOCOL_SMB3_00, SMB3_DIALECT_REVISION_300},
3226 struct smbXcli_negprot_state {
3227 struct smbXcli_conn *conn;
3228 struct tevent_context *ev;
3229 uint32_t timeout_msec;
3230 enum protocol_types min_protocol;
3231 enum protocol_types max_protocol;
3235 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
3239 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
3240 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
3241 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
3242 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
3243 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
3244 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3248 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
3249 struct tevent_context *ev,
3250 struct smbXcli_conn *conn,
3251 uint32_t timeout_msec,
3252 enum protocol_types min_protocol,
3253 enum protocol_types max_protocol)
3255 struct tevent_req *req, *subreq;
3256 struct smbXcli_negprot_state *state;
3258 req = tevent_req_create(mem_ctx, &state,
3259 struct smbXcli_negprot_state);
3265 state->timeout_msec = timeout_msec;
3266 state->min_protocol = min_protocol;
3267 state->max_protocol = max_protocol;
3269 if (min_protocol == PROTOCOL_NONE) {
3270 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3271 return tevent_req_post(req, ev);
3274 if (max_protocol == PROTOCOL_NONE) {
3275 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3276 return tevent_req_post(req, ev);
3279 if (min_protocol > max_protocol) {
3280 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3281 return tevent_req_post(req, ev);
3284 if ((min_protocol < PROTOCOL_SMB2_02) &&
3285 (max_protocol < PROTOCOL_SMB2_02)) {
3289 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3291 subreq = smbXcli_negprot_smb1_subreq(state);
3292 if (tevent_req_nomem(subreq, req)) {
3293 return tevent_req_post(req, ev);
3295 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3299 if ((min_protocol >= PROTOCOL_SMB2_02) &&
3300 (max_protocol >= PROTOCOL_SMB2_02)) {
3304 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3306 subreq = smbXcli_negprot_smb2_subreq(state);
3307 if (tevent_req_nomem(subreq, req)) {
3308 return tevent_req_post(req, ev);
3310 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3315 * We send an SMB1 negprot with the SMB2 dialects
3316 * and expect a SMB1 or a SMB2 response.
3318 * smbXcli_negprot_dispatch_incoming() will fix the
3319 * callback to match protocol of the response.
3321 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
3323 subreq = smbXcli_negprot_smb1_subreq(state);
3324 if (tevent_req_nomem(subreq, req)) {
3325 return tevent_req_post(req, ev);
3327 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
3331 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
3333 struct tevent_req *req =
3334 tevent_req_callback_data(subreq,
3339 * we just want the low level error
3341 status = tevent_req_simple_recv_ntstatus(subreq);
3342 TALLOC_FREE(subreq);
3343 if (tevent_req_nterror(req, status)) {
3347 /* this should never happen */
3348 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
3351 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
3354 DATA_BLOB bytes = data_blob_null;
3358 /* setup the protocol strings */
3359 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3363 if (smb1cli_prots[i].proto < state->min_protocol) {
3367 if (smb1cli_prots[i].proto > state->max_protocol) {
3371 ok = data_blob_append(state, &bytes, &c, sizeof(c));
3377 * We now it is already ascii and
3378 * we want NULL termination.
3380 ok = data_blob_append(state, &bytes,
3381 smb1cli_prots[i].smb1_name,
3382 strlen(smb1cli_prots[i].smb1_name)+1);
3388 smb1cli_req_flags(state->max_protocol,
3389 state->conn->smb1.client.capabilities,
3394 return smb1cli_req_send(state, state->ev, state->conn,
3398 state->timeout_msec,
3399 0xFFFE, 0, 0, /* pid, tid, uid */
3400 0, NULL, /* wct, vwv */
3401 bytes.length, bytes.data);
3404 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
3406 struct tevent_req *req =
3407 tevent_req_callback_data(subreq,
3409 struct smbXcli_negprot_state *state =
3410 tevent_req_data(req,
3411 struct smbXcli_negprot_state);
3412 struct smbXcli_conn *conn = state->conn;
3413 struct iovec *recv_iov = NULL;
3422 size_t num_prots = 0;
3424 uint32_t client_capabilities = conn->smb1.client.capabilities;
3425 uint32_t both_capabilities;
3426 uint32_t server_capabilities = 0;
3427 uint32_t capabilities;
3428 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
3429 uint32_t server_max_xmit = 0;
3431 uint32_t server_max_mux = 0;
3432 uint16_t server_security_mode = 0;
3433 uint32_t server_session_key = 0;
3434 bool server_readbraw = false;
3435 bool server_writebraw = false;
3436 bool server_lockread = false;
3437 bool server_writeunlock = false;
3438 struct GUID server_guid = GUID_zero();
3439 DATA_BLOB server_gss_blob = data_blob_null;
3440 uint8_t server_challenge[8];
3441 char *server_workgroup = NULL;
3442 char *server_name = NULL;
3443 int server_time_zone = 0;
3444 NTTIME server_system_time = 0;
3445 static const struct smb1cli_req_expected_response expected[] = {
3447 .status = NT_STATUS_OK,
3448 .wct = 0x11, /* NT1 */
3451 .status = NT_STATUS_OK,
3452 .wct = 0x0D, /* LM */
3455 .status = NT_STATUS_OK,
3456 .wct = 0x01, /* CORE */
3460 ZERO_STRUCT(server_challenge);
3462 status = smb1cli_req_recv(subreq, state,
3467 NULL, /* pvwv_offset */
3470 NULL, /* pbytes_offset */
3472 expected, ARRAY_SIZE(expected));
3473 TALLOC_FREE(subreq);
3474 if (tevent_req_nterror(req, status)) {
3478 flags = CVAL(inhdr, HDR_FLG);
3480 protnum = SVAL(vwv, 0);
3482 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3483 if (smb1cli_prots[i].proto < state->min_protocol) {
3487 if (smb1cli_prots[i].proto > state->max_protocol) {
3491 if (protnum != num_prots) {
3496 conn->protocol = smb1cli_prots[i].proto;
3500 if (conn->protocol == PROTOCOL_NONE) {
3501 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3505 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
3506 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
3507 "and the selected protocol level doesn't support it.\n"));
3508 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3512 if (flags & FLAG_SUPPORT_LOCKREAD) {
3513 server_lockread = true;
3514 server_writeunlock = true;
3517 if (conn->protocol >= PROTOCOL_NT1) {
3518 const char *client_signing = NULL;
3519 bool server_mandatory = false;
3520 bool server_allowed = false;
3521 const char *server_signing = NULL;
3526 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3531 server_security_mode = CVAL(vwv + 1, 0);
3532 server_max_mux = SVAL(vwv + 1, 1);
3533 server_max_xmit = IVAL(vwv + 3, 1);
3534 server_session_key = IVAL(vwv + 7, 1);
3535 server_time_zone = SVALS(vwv + 15, 1);
3536 server_time_zone *= 60;
3537 /* this time arrives in real GMT */
3538 server_system_time = BVAL(vwv + 11, 1);
3539 server_capabilities = IVAL(vwv + 9, 1);
3541 key_len = CVAL(vwv + 16, 1);
3543 if (server_capabilities & CAP_RAW_MODE) {
3544 server_readbraw = true;
3545 server_writebraw = true;
3547 if (server_capabilities & CAP_LOCK_AND_READ) {
3548 server_lockread = true;
3551 if (server_capabilities & CAP_EXTENDED_SECURITY) {
3552 DATA_BLOB blob1, blob2;
3554 if (num_bytes < 16) {
3555 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3559 blob1 = data_blob_const(bytes, 16);
3560 status = GUID_from_data_blob(&blob1, &server_guid);
3561 if (tevent_req_nterror(req, status)) {
3565 blob1 = data_blob_const(bytes+16, num_bytes-16);
3566 blob2 = data_blob_dup_talloc(state, blob1);
3567 if (blob1.length > 0 &&
3568 tevent_req_nomem(blob2.data, req)) {
3571 server_gss_blob = blob2;
3573 DATA_BLOB blob1, blob2;
3575 if (num_bytes < key_len) {
3576 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3580 if (key_len != 0 && key_len != 8) {
3581 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3586 memcpy(server_challenge, bytes, 8);
3589 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3590 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
3591 if (blob1.length > 0) {
3594 len = utf16_len_n(blob1.data,
3598 ok = convert_string_talloc(state,
3606 status = map_nt_error_from_unix_common(errno);
3607 tevent_req_nterror(req, status);
3612 blob2.data += blob1.length;
3613 blob2.length -= blob1.length;
3614 if (blob2.length > 0) {
3617 len = utf16_len_n(blob1.data,
3621 ok = convert_string_talloc(state,
3629 status = map_nt_error_from_unix_common(errno);
3630 tevent_req_nterror(req, status);
3636 client_signing = "disabled";
3637 if (conn->allow_signing) {
3638 client_signing = "allowed";
3640 if (conn->mandatory_signing) {
3641 client_signing = "required";
3644 server_signing = "not supported";
3645 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3646 server_signing = "supported";
3647 server_allowed = true;
3649 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3650 server_signing = "required";
3651 server_mandatory = true;
3654 ok = smb_signing_set_negotiated(conn->smb1.signing,
3658 DEBUG(1,("cli_negprot: SMB signing is required, "
3659 "but client[%s] and server[%s] mismatch\n",
3660 client_signing, server_signing));
3661 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3665 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3671 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3675 server_security_mode = SVAL(vwv + 1, 0);
3676 server_max_xmit = SVAL(vwv + 2, 0);
3677 server_max_mux = SVAL(vwv + 3, 0);
3678 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3679 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3680 server_session_key = IVAL(vwv + 6, 0);
3681 server_time_zone = SVALS(vwv + 10, 0);
3682 server_time_zone *= 60;
3683 /* this time is converted to GMT by make_unix_date */
3684 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3685 unix_to_nt_time(&server_system_time, t);
3686 key_len = SVAL(vwv + 11, 0);
3688 if (num_bytes < key_len) {
3689 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3693 if (key_len != 0 && key_len != 8) {
3694 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3699 memcpy(server_challenge, bytes, 8);
3702 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3703 if (blob1.length > 0) {
3707 len = utf16_len_n(blob1.data,
3711 ok = convert_string_talloc(state,
3719 status = map_nt_error_from_unix_common(errno);
3720 tevent_req_nterror(req, status);
3726 /* the old core protocol */
3727 server_time_zone = get_time_zone(time(NULL));
3728 server_max_xmit = 1024;
3732 if (server_max_xmit < 1024) {
3733 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3737 if (server_max_mux < 1) {
3738 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3743 * Now calculate the negotiated capabilities
3744 * based on the mask for:
3745 * - client only flags
3746 * - flags used in both directions
3747 * - server only flags
3749 both_capabilities = client_capabilities & server_capabilities;
3750 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3751 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3752 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3754 max_xmit = MIN(client_max_xmit, server_max_xmit);
3756 conn->smb1.server.capabilities = server_capabilities;
3757 conn->smb1.capabilities = capabilities;
3759 conn->smb1.server.max_xmit = server_max_xmit;
3760 conn->smb1.max_xmit = max_xmit;
3762 conn->smb1.server.max_mux = server_max_mux;
3764 conn->smb1.server.security_mode = server_security_mode;
3766 conn->smb1.server.readbraw = server_readbraw;
3767 conn->smb1.server.writebraw = server_writebraw;
3768 conn->smb1.server.lockread = server_lockread;
3769 conn->smb1.server.writeunlock = server_writeunlock;
3771 conn->smb1.server.session_key = server_session_key;
3773 talloc_steal(conn, server_gss_blob.data);
3774 conn->smb1.server.gss_blob = server_gss_blob;
3775 conn->smb1.server.guid = server_guid;
3776 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3777 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3778 conn->smb1.server.name = talloc_move(conn, &server_name);
3780 conn->smb1.server.time_zone = server_time_zone;
3781 conn->smb1.server.system_time = server_system_time;
3783 tevent_req_done(req);
3786 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3790 uint16_t dialect_count = 0;
3792 buf = state->smb2.dyn;
3793 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3794 if (smb2cli_prots[i].proto < state->min_protocol) {
3798 if (smb2cli_prots[i].proto > state->max_protocol) {
3802 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3806 buf = state->smb2.fixed;
3808 SSVAL(buf, 2, dialect_count);
3809 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3810 SSVAL(buf, 6, 0); /* Reserved */
3811 if (state->max_protocol >= PROTOCOL_SMB2_22) {
3812 SIVAL(buf, 8, state->conn->smb2.client.capabilities);
3814 SIVAL(buf, 8, 0); /* Capabilities */
3816 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3820 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3822 if (!NT_STATUS_IS_OK(status)) {
3825 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3827 memset(buf+12, 0, 16); /* ClientGuid */
3829 SBVAL(buf, 28, 0); /* ClientStartTime */
3831 return smb2cli_req_send(state, state->ev,
3832 state->conn, SMB2_OP_NEGPROT,
3834 state->timeout_msec,
3835 0xFEFF, 0, NULL, /* pid, tid, session */
3836 state->smb2.fixed, sizeof(state->smb2.fixed),
3837 state->smb2.dyn, dialect_count*2);
3840 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3842 struct tevent_req *req =
3843 tevent_req_callback_data(subreq,
3845 struct smbXcli_negprot_state *state =
3846 tevent_req_data(req,
3847 struct smbXcli_negprot_state);
3848 struct smbXcli_conn *conn = state->conn;
3849 size_t security_offset, security_length;
3855 uint16_t dialect_revision;
3856 static const struct smb2cli_req_expected_response expected[] = {
3858 .status = NT_STATUS_OK,
3863 status = smb2cli_req_recv(subreq, state, &iov,
3864 expected, ARRAY_SIZE(expected));
3865 TALLOC_FREE(subreq);
3866 if (tevent_req_nterror(req, status)) {
3870 body = (uint8_t *)iov[1].iov_base;
3872 dialect_revision = SVAL(body, 4);
3874 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3875 if (smb2cli_prots[i].proto < state->min_protocol) {
3879 if (smb2cli_prots[i].proto > state->max_protocol) {
3883 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3887 conn->protocol = smb2cli_prots[i].proto;
3891 if (conn->protocol == PROTOCOL_NONE) {
3892 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3893 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3897 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3898 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3902 /* make sure we do not loop forever */
3903 state->min_protocol = PROTOCOL_SMB2_02;
3906 * send a SMB2 negprot, in order to negotiate
3909 subreq = smbXcli_negprot_smb2_subreq(state);
3910 if (tevent_req_nomem(subreq, req)) {
3913 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3917 conn->smb2.server.security_mode = SVAL(body, 2);
3919 blob = data_blob_const(body + 8, 16);
3920 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3921 if (tevent_req_nterror(req, status)) {
3925 conn->smb2.server.capabilities = IVAL(body, 24);
3926 conn->smb2.server.max_trans_size= IVAL(body, 28);
3927 conn->smb2.server.max_read_size = IVAL(body, 32);
3928 conn->smb2.server.max_write_size= IVAL(body, 36);
3929 conn->smb2.server.system_time = BVAL(body, 40);
3930 conn->smb2.server.start_time = BVAL(body, 48);
3932 security_offset = SVAL(body, 56);
3933 security_length = SVAL(body, 58);
3935 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3936 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3940 if (security_length > iov[2].iov_len) {
3941 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3945 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3948 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3952 tevent_req_done(req);
3955 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3956 TALLOC_CTX *tmp_mem,
3959 size_t num_pending = talloc_array_length(conn->pending);
3960 struct tevent_req *subreq;
3961 struct smbXcli_req_state *substate;
3962 struct tevent_req *req;
3963 uint32_t protocol_magic = IVAL(inbuf, 4);
3965 if (num_pending != 1) {
3966 return NT_STATUS_INTERNAL_ERROR;
3969 subreq = conn->pending[0];
3970 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3971 req = tevent_req_callback_data(subreq, struct tevent_req);
3973 switch (protocol_magic) {
3975 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3976 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3977 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3980 if (substate->smb2.recv_iov == NULL) {
3982 * For the SMB1 negprot we have move it.
3984 substate->smb2.recv_iov = substate->smb1.recv_iov;
3985 substate->smb1.recv_iov = NULL;
3989 * we got an SMB2 answer, which consumed sequence number 0
3990 * so we need to use 1 as the next one
3993 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3994 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3995 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3998 DEBUG(10, ("Got non-SMB PDU\n"));
3999 return NT_STATUS_INVALID_NETWORK_RESPONSE;
4002 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
4004 return tevent_req_simple_recv_ntstatus(req);
4007 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
4008 uint32_t timeout_msec,
4009 enum protocol_types min_protocol,
4010 enum protocol_types max_protocol)
4012 TALLOC_CTX *frame = talloc_stackframe();
4013 struct tevent_context *ev;
4014 struct tevent_req *req;
4015 NTSTATUS status = NT_STATUS_NO_MEMORY;
4018 if (smbXcli_conn_has_async_calls(conn)) {
4020 * Can't use sync call while an async call is in flight
4022 status = NT_STATUS_INVALID_PARAMETER_MIX;
4025 ev = tevent_context_init(frame);
4029 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
4030 min_protocol, max_protocol);
4034 ok = tevent_req_poll(req, ev);
4036 status = map_nt_error_from_unix_common(errno);
4039 status = smbXcli_negprot_recv(req);
4045 static int smbXcli_session_destructor(struct smbXcli_session *session)
4047 if (session->conn == NULL) {
4051 DLIST_REMOVE(session->conn->sessions, session);
4055 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
4056 struct smbXcli_conn *conn)
4058 struct smbXcli_session *session;
4060 session = talloc_zero(mem_ctx, struct smbXcli_session);
4061 if (session == NULL) {
4064 talloc_set_destructor(session, smbXcli_session_destructor);
4066 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
4067 session->conn = conn;
4072 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
4074 struct smbXcli_conn *conn = session->conn;
4075 uint8_t security_mode = 0;
4078 return security_mode;
4081 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
4082 if (conn->mandatory_signing) {
4083 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
4086 return security_mode;
4089 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
4091 return session->smb2.session_id;
4094 uint16_t smb2cli_session_get_flags(struct smbXcli_session *session)
4096 return session->smb2.session_flags;
4099 NTSTATUS smb2cli_session_application_key(struct smbXcli_session *session,
4100 TALLOC_CTX *mem_ctx,
4103 *key = data_blob_null;
4105 if (session->smb2.application_key.length == 0) {
4106 return NT_STATUS_NO_USER_SESSION_KEY;
4109 *key = data_blob_dup_talloc(mem_ctx, session->smb2.application_key);
4110 if (key->data == NULL) {
4111 return NT_STATUS_NO_MEMORY;
4114 return NT_STATUS_OK;
4117 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
4118 uint64_t session_id,
4119 uint16_t session_flags)
4121 session->smb2.session_id = session_id;
4122 session->smb2.session_flags = session_flags;
4125 NTSTATUS smb2cli_session_set_session_key(struct smbXcli_session *session,
4126 const DATA_BLOB _session_key,
4127 const struct iovec *recv_iov)
4129 struct smbXcli_conn *conn = session->conn;
4130 uint16_t no_sign_flags;
4131 uint8_t session_key[16];
4135 return NT_STATUS_INVALID_PARAMETER_MIX;
4138 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
4140 if (session->smb2.session_flags & no_sign_flags) {
4141 session->smb2.should_sign = false;
4142 return NT_STATUS_OK;
4145 if (session->smb2.signing_key.length != 0) {
4146 return NT_STATUS_INVALID_PARAMETER_MIX;
4149 ZERO_STRUCT(session_key);
4150 memcpy(session_key, _session_key.data,
4151 MIN(_session_key.length, sizeof(session_key)));
4153 session->smb2.signing_key = data_blob_talloc(session,
4155 sizeof(session_key));
4156 if (session->smb2.signing_key.data == NULL) {
4157 ZERO_STRUCT(session_key);
4158 return NT_STATUS_NO_MEMORY;
4161 if (conn->protocol >= PROTOCOL_SMB2_24) {
4162 const DATA_BLOB label = data_blob_string_const_null("SMB2AESCMAC");
4163 const DATA_BLOB context = data_blob_string_const_null("SmbSign");
4165 smb2_key_derivation(session_key, sizeof(session_key),
4166 label.data, label.length,
4167 context.data, context.length,
4168 session->smb2.signing_key.data);
4171 session->smb2.application_key = data_blob_dup_talloc(session,
4172 session->smb2.signing_key);
4173 if (session->smb2.application_key.data == NULL) {
4174 ZERO_STRUCT(session_key);
4175 return NT_STATUS_NO_MEMORY;
4178 if (conn->protocol >= PROTOCOL_SMB2_24) {
4179 const DATA_BLOB label = data_blob_string_const_null("SMB2APP");
4180 const DATA_BLOB context = data_blob_string_const_null("SmbRpc");
4182 smb2_key_derivation(session_key, sizeof(session_key),
4183 label.data, label.length,
4184 context.data, context.length,
4185 session->smb2.application_key.data);
4187 ZERO_STRUCT(session_key);
4189 session->smb2.channel_signing_key = data_blob_dup_talloc(session,
4190 session->smb2.signing_key);
4191 if (session->smb2.channel_signing_key.data == NULL) {
4192 return NT_STATUS_NO_MEMORY;
4195 status = smb2_signing_check_pdu(session->smb2.channel_signing_key,
4196 session->conn->protocol,
4198 if (!NT_STATUS_IS_OK(status)) {
4202 session->smb2.should_sign = false;
4204 if (conn->desire_signing) {
4205 session->smb2.should_sign = true;
4208 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
4209 session->smb2.should_sign = true;
4212 return NT_STATUS_OK;
4215 NTSTATUS smb2cli_session_create_channel(TALLOC_CTX *mem_ctx,
4216 struct smbXcli_session *session1,
4217 struct smbXcli_conn *conn,
4218 struct smbXcli_session **_session2)
4220 struct smbXcli_session *session2;
4222 if (session1->smb2.signing_key.length == 0) {
4223 return NT_STATUS_INVALID_PARAMETER_MIX;
4227 return NT_STATUS_INVALID_PARAMETER_MIX;
4230 session2 = talloc_zero(mem_ctx, struct smbXcli_session);
4231 if (session2 == NULL) {
4232 return NT_STATUS_NO_MEMORY;
4234 session2->smb2.session_id = session1->smb2.session_id;
4235 session2->smb2.session_flags = session1->smb2.session_flags;
4237 session2->smb2.signing_key = data_blob_dup_talloc(session2,
4238 session1->smb2.signing_key);
4239 if (session2->smb2.signing_key.data == NULL) {
4240 return NT_STATUS_NO_MEMORY;
4243 session2->smb2.should_sign = session1->smb2.should_sign;
4245 talloc_set_destructor(session2, smbXcli_session_destructor);
4246 DLIST_ADD_END(conn->sessions, session2, struct smbXcli_session *);
4247 session2->conn = conn;
4249 *_session2 = session2;
4250 return NT_STATUS_OK;
4253 NTSTATUS smb2cli_session_set_channel_key(struct smbXcli_session *session,
4254 const DATA_BLOB _channel_key,
4255 const struct iovec *recv_iov)
4257 struct smbXcli_conn *conn = session->conn;
4258 uint8_t channel_key[16];
4262 return NT_STATUS_INVALID_PARAMETER_MIX;
4265 if (session->smb2.channel_signing_key.length != 0) {
4266 return NT_STATUS_INVALID_PARAMETER_MIX;
4269 ZERO_STRUCT(channel_key);
4270 memcpy(channel_key, _channel_key.data,
4271 MIN(_channel_key.length, sizeof(channel_key)));
4273 session->smb2.channel_signing_key = data_blob_talloc(session,
4275 sizeof(channel_key));
4276 if (session->smb2.channel_signing_key.data == NULL) {
4277 ZERO_STRUCT(channel_key);
4278 return NT_STATUS_NO_MEMORY;
4281 if (conn->protocol >= PROTOCOL_SMB2_24) {
4282 const DATA_BLOB label = data_blob_string_const_null("SMB2AESCMAC");
4283 const DATA_BLOB context = data_blob_string_const_null("SmbSign");
4285 smb2_key_derivation(channel_key, sizeof(channel_key),
4286 label.data, label.length,
4287 context.data, context.length,
4288 session->smb2.channel_signing_key.data);
4290 ZERO_STRUCT(channel_key);
4292 status = smb2_signing_check_pdu(session->smb2.channel_signing_key,
4293 session->conn->protocol,
4295 if (!NT_STATUS_IS_OK(status)) {
4299 return NT_STATUS_OK;