2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "smbd/globals.h"
23 #include "../libcli/smb/smb_common.h"
24 #include "librpc/gen_ndr/messaging.h"
26 struct smbd_smb2_lock_element {
32 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
33 struct tevent_context *ev,
34 struct smbd_smb2_request *smb2req,
36 uint64_t in_file_id_volatile,
37 uint16_t in_lock_count,
38 struct smbd_smb2_lock_element *in_locks);
39 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
41 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
42 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
45 const uint8_t *inbody;
46 const int i = req->current_idx;
47 size_t expected_body_size = 0x30;
50 uint16_t in_lock_count;
51 uint64_t in_file_id_persistent;
52 uint64_t in_file_id_volatile;
53 struct smbd_smb2_lock_element *in_locks;
54 struct tevent_req *subreq;
55 const uint8_t *lock_buffer;
58 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
59 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
60 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
63 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
65 body_size = SVAL(inbody, 0x00);
66 if (body_size != expected_body_size) {
67 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
70 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
72 in_lock_count = CVAL(inbody, 0x02);
73 /* 0x04 - 4 bytes reserved */
74 in_file_id_persistent = BVAL(inbody, 0x08);
75 in_file_id_volatile = BVAL(inbody, 0x10);
77 if (in_lock_count < 1) {
78 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
81 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
82 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
85 if (req->compat_chain_fsp) {
87 } else if (in_file_id_persistent != 0) {
88 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
91 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
93 if (in_locks == NULL) {
94 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
98 lock_buffer = inbody + 0x18;
100 in_locks[l].offset = BVAL(lock_buffer, 0x00);
101 in_locks[l].length = BVAL(lock_buffer, 0x08);
102 in_locks[l].flags = IVAL(lock_buffer, 0x10);
103 /* 0x14 - 4 reserved bytes */
105 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
107 for (l=1; l < in_lock_count; l++) {
108 in_locks[l].offset = BVAL(lock_buffer, 0x00);
109 in_locks[l].length = BVAL(lock_buffer, 0x08);
110 in_locks[l].flags = IVAL(lock_buffer, 0x10);
111 /* 0x14 - 4 reserved bytes */
116 subreq = smbd_smb2_lock_send(req,
117 req->sconn->smb2.event_ctx,
123 if (subreq == NULL) {
124 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
126 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
128 return smbd_smb2_request_pending_queue(req, subreq);
131 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
133 struct smbd_smb2_request *req = tevent_req_callback_data(subreq,
134 struct smbd_smb2_request);
137 NTSTATUS error; /* transport error */
139 if (req->cancelled) {
140 const uint8_t *inhdr = (const uint8_t *)
141 req->in.vector[req->current_idx].iov_base;
142 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
144 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
145 (unsigned long long)mid ));
146 error = smbd_smb2_request_error(req, NT_STATUS_CANCELLED);
147 if (!NT_STATUS_IS_OK(error)) {
148 smbd_server_connection_terminate(req->sconn,
155 status = smbd_smb2_lock_recv(subreq);
157 if (!NT_STATUS_IS_OK(status)) {
158 error = smbd_smb2_request_error(req, status);
159 if (!NT_STATUS_IS_OK(error)) {
160 smbd_server_connection_terminate(req->sconn,
167 outbody = data_blob_talloc(req->out.vector, NULL, 0x04);
168 if (outbody.data == NULL) {
169 error = smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
170 if (!NT_STATUS_IS_OK(error)) {
171 smbd_server_connection_terminate(req->sconn,
178 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
179 SSVAL(outbody.data, 0x02, 0); /* reserved */
181 error = smbd_smb2_request_done(req, outbody, NULL);
182 if (!NT_STATUS_IS_OK(error)) {
183 smbd_server_connection_terminate(req->sconn,
189 struct smbd_smb2_lock_state {
190 struct smbd_smb2_request *smb2req;
191 struct smb_request *smb1req;
192 struct blocking_lock_record *blr;
194 struct smbd_lock_element *locks;
197 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
198 struct tevent_context *ev,
199 struct smbd_smb2_request *smb2req,
201 uint64_t in_file_id_volatile,
202 uint16_t in_lock_count,
203 struct smbd_smb2_lock_element *in_locks)
205 struct tevent_req *req;
206 struct smbd_smb2_lock_state *state;
207 struct smb_request *smb1req;
208 connection_struct *conn = smb2req->tcon->compat_conn;
210 int32_t timeout = -1;
211 bool isunlock = false;
213 struct smbd_lock_element *locks;
217 req = tevent_req_create(mem_ctx, &state,
218 struct smbd_smb2_lock_state);
222 state->smb2req = smb2req;
223 smb1req = smbd_smb2_fake_smb_request(smb2req);
224 if (tevent_req_nomem(smb1req, req)) {
225 return tevent_req_post(req, ev);
227 state->smb1req = smb1req;
229 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
230 (unsigned long long)in_file_id_volatile));
232 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
234 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
235 return tevent_req_post(req, ev);
237 if (conn != fsp->conn) {
238 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
239 return tevent_req_post(req, ev);
241 if (smb2req->session->vuid != fsp->vuid) {
242 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
243 return tevent_req_post(req, ev);
246 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
248 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
249 return tevent_req_post(req, ev);
252 switch (in_locks[0].flags) {
253 case SMB2_LOCK_FLAG_SHARED:
254 case SMB2_LOCK_FLAG_EXCLUSIVE:
255 if (in_lock_count > 1) {
256 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
257 return tevent_req_post(req, ev);
262 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
263 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
267 case SMB2_LOCK_FLAG_UNLOCK:
268 /* only the first lock gives the UNLOCK bit - see
275 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
276 return tevent_req_post(req, ev);
279 for (i=0; i<in_lock_count; i++) {
280 bool invalid = false;
282 switch (in_locks[i].flags) {
283 case SMB2_LOCK_FLAG_SHARED:
284 case SMB2_LOCK_FLAG_EXCLUSIVE:
286 tevent_req_nterror(req,
287 NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 tevent_req_nterror(req,
292 NT_STATUS_INVALID_PARAMETER);
293 return tevent_req_post(req, ev);
297 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
298 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
300 tevent_req_nterror(req,
301 NT_STATUS_INVALID_PARAMETER);
302 return tevent_req_post(req, ev);
306 case SMB2_LOCK_FLAG_UNLOCK:
308 tevent_req_nterror(req,
309 NT_STATUS_INVALID_PARAMETER);
310 return tevent_req_post(req, ev);
317 * is the first element was a UNLOCK
318 * we need to deferr the error response
319 * to the backend, because we need to process
320 * all unlock elements before
325 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
326 return tevent_req_post(req, ev);
329 locks[i].smbpid = in_smbpid;
330 locks[i].offset = in_locks[i].offset;
331 locks[i].count = in_locks[i].length;
333 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
334 locks[i].brltype = WRITE_LOCK;
335 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
336 locks[i].brltype = READ_LOCK;
337 } else if (invalid) {
339 * this is an invalid UNLOCK element
340 * and the backend needs to test for
341 * brltype != UNLOCK_LOCK and return
342 * NT_STATUS_INVALID_PARAMER
344 locks[i].brltype = READ_LOCK;
346 locks[i].brltype = UNLOCK_LOCK;
350 state->locks = locks;
351 state->lock_count = in_lock_count;
354 status = smbd_do_locking(smb1req, fsp,
363 status = smbd_do_locking(smb1req, fsp,
372 if (!NT_STATUS_IS_OK(status)) {
373 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
374 status = NT_STATUS_LOCK_NOT_GRANTED;
376 tevent_req_nterror(req, status);
377 return tevent_req_post(req, ev);
384 tevent_req_done(req);
385 return tevent_req_post(req, ev);
388 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
392 if (tevent_req_is_nterror(req, &status)) {
393 tevent_req_received(req);
397 tevent_req_received(req);
401 /****************************************************************
402 Cancel an outstanding blocking lock request.
403 *****************************************************************/
405 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
407 struct smbd_smb2_request *smb2req = NULL;
408 struct smbd_smb2_lock_state *state = tevent_req_data(req,
409 struct smbd_smb2_lock_state);
414 if (!state->smb2req) {
418 smb2req = state->smb2req;
419 smb2req->cancelled = true;
421 tevent_req_done(req);
425 /****************************************************************
426 Got a message saying someone unlocked a file. Re-schedule all
427 blocking lock requests as we don't know if anything overlapped.
428 *****************************************************************/
430 static void received_unlock_msg(struct messaging_context *msg,
433 struct server_id server_id,
436 DEBUG(10,("received_unlock_msg (SMB2)\n"));
437 process_blocking_lock_queue_smb2();
440 /****************************************************************
441 Function to get the blr on a pending record.
442 *****************************************************************/
444 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
446 struct smbd_smb2_lock_state *state = NULL;
447 const uint8_t *inhdr;
452 if (smb2req->subreq == NULL) {
455 if (!tevent_req_is_in_progress(smb2req->subreq)) {
458 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
459 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
462 state = tevent_req_data(smb2req->subreq,
463 struct smbd_smb2_lock_state);
469 /****************************************************************
470 Set up the next brl timeout.
471 *****************************************************************/
473 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
475 struct smbd_smb2_request *smb2req;
476 struct timeval next_timeout;
477 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
479 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
481 next_timeout = timeval_zero();
483 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
484 struct blocking_lock_record *blr =
485 get_pending_smb2req_blr(smb2req);
489 if (timeval_is_zero(&blr->expire_time)) {
491 * If we're blocked on pid 0xFFFFFFFF this is
492 * a POSIX lock, so calculate a timeout of
493 * 10 seconds into the future.
495 if (blr->blocking_pid == 0xFFFFFFFF) {
496 struct timeval psx_to = timeval_current_ofs(10, 0);
497 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
503 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
506 if (timeval_is_zero(&next_timeout)) {
507 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
508 "timeout = Infinite.\n"));
513 * To account for unclean shutdowns by clients we need a
514 * maximum timeout that we use for checking pending locks. If
515 * we have any pending locks at all, then check if the pending
516 * lock can continue at least every brl:recalctime seconds
517 * (default 5 seconds).
519 * This saves us needing to do a message_send_all() in the
520 * SIGCHLD handler in the parent daemon. That
521 * message_send_all() caused O(n^2) work to be done when IP
522 * failovers happened in clustered Samba, which could make the
523 * entire system unusable for many minutes.
526 if (max_brl_timeout > 0) {
527 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
528 next_timeout = timeval_brl_min(&next_timeout, &min_to);
532 struct timeval cur, from_now;
534 cur = timeval_current();
535 from_now = timeval_until(&cur, &next_timeout);
536 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
537 "timeout = %d.%d seconds from now.\n",
538 (int)from_now.tv_sec, (int)from_now.tv_usec));
541 sconn->smb2.locks.brl_timeout = event_add_timed(
542 smbd_event_context(),
547 if (!sconn->smb2.locks.brl_timeout) {
553 /****************************************************************
554 Get an SMB2 lock reqeust to go async. lock_timeout should
556 *****************************************************************/
558 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
559 struct smb_request *smb1req,
564 enum brl_type lock_type,
565 enum brl_flavour lock_flav,
568 uint32_t blocking_pid)
570 struct smbd_server_connection *sconn = smbd_server_conn;
571 struct smbd_smb2_request *smb2req = smb1req->smb2req;
572 struct tevent_req *req = NULL;
573 struct smbd_smb2_lock_state *state = NULL;
574 NTSTATUS status = NT_STATUS_OK;
579 req = smb2req->subreq;
583 state = tevent_req_data(req, struct smbd_smb2_lock_state);
589 struct blocking_lock_record *blr = talloc_zero(state,
590 struct blocking_lock_record);
594 blr = talloc_zero(state, struct blocking_lock_record);
596 if (lock_timeout == -1) {
597 blr->expire_time.tv_sec = 0;
598 blr->expire_time.tv_usec = 0; /* Never expire. */
600 blr->expire_time = timeval_current_ofs(
602 (lock_timeout % 1000) * 1000);
605 blr->lock_num = lock_num;
606 blr->lock_pid = lock_pid;
607 blr->blocking_pid = blocking_pid;
608 blr->lock_flav = lock_flav;
609 blr->lock_type = lock_type;
610 blr->offset = offset;
613 /* Specific brl_lock() implementations can fill this in. */
614 blr->blr_private = NULL;
616 /* Add a pending lock record for this. */
617 status = brl_lock(smbd_messaging_context(),
623 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
629 if (!NT_STATUS_IS_OK(status)) {
630 DEBUG(0,("push_blocking_lock_request_smb2: "
631 "failed to add PENDING_LOCK record.\n"));
638 recalc_smb2_brl_timeout(sconn);
640 /* Ensure we'll receive messages when this is unlocked. */
641 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
642 messaging_register(smbd_messaging_context(), NULL,
643 MSG_SMB_UNLOCK, received_unlock_msg);
644 sconn->smb2.locks.blocking_lock_unlock_state = true;
647 /* allow this request to be canceled */
648 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
653 /****************************************************************
654 Re-proccess a blocking lock request.
655 This is equivalent to process_lockingX() inside smbd/blocking.c
656 *****************************************************************/
658 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req)
661 struct blocking_lock_record *blr = NULL;
662 struct smbd_smb2_lock_state *state = NULL;
663 files_struct *fsp = NULL;
665 if (!smb2req->subreq) {
668 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
676 /* Try and finish off getting all the outstanding locks. */
678 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
679 struct byte_range_lock *br_lck = NULL;
680 struct smbd_lock_element *e = &state->locks[blr->lock_num];
682 br_lck = do_lock(smbd_messaging_context(),
696 if (NT_STATUS_IS_ERR(status)) {
701 if(blr->lock_num == state->lock_count) {
703 * Success - we got all the locks.
706 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
707 "fnum=%d num_locks=%d\n",
710 (int)state->lock_count));
712 tevent_req_done(smb2req->subreq);
716 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
717 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
719 * We have other than a "can't get lock"
720 * error. Return an error.
722 tevent_req_nterror(smb2req->subreq, status);
727 * Still can't get all the locks - keep waiting.
730 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
731 "for file %s, fnum = %d. Waiting....\n",
733 (int)state->lock_count,
741 /****************************************************************
742 Attempt to proccess all outstanding blocking locks pending on
744 *****************************************************************/
746 void process_blocking_lock_queue_smb2(void)
748 struct smbd_server_connection *sconn = smbd_server_conn;
749 struct smbd_smb2_request *smb2req, *nextreq;
751 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
752 const uint8_t *inhdr;
754 nextreq = smb2req->next;
756 if (smb2req->subreq == NULL) {
757 /* This message has been processed. */
760 if (!tevent_req_is_in_progress(smb2req->subreq)) {
761 /* This message has been processed. */
765 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
766 if (IVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
767 reprocess_blocked_smb2_lock(smb2req);
771 recalc_smb2_brl_timeout(sconn);
774 /****************************************************************************
775 Remove any locks on this fd. Called from file_close().
776 ****************************************************************************/
778 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
779 struct byte_range_lock *br_lck)
781 struct smbd_server_connection *sconn = smbd_server_conn;
782 struct smbd_smb2_request *smb2req, *nextreq;
784 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
785 struct smbd_smb2_lock_state *state = NULL;
786 files_struct *fsp_curr = NULL;
787 int i = smb2req->current_idx;
788 uint64_t in_file_id_volatile;
789 struct blocking_lock_record *blr = NULL;
790 const uint8_t *inhdr;
791 const uint8_t *inbody;
793 nextreq = smb2req->next;
795 if (smb2req->subreq == NULL) {
796 /* This message has been processed. */
799 if (!tevent_req_is_in_progress(smb2req->subreq)) {
800 /* This message has been processed. */
804 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
805 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
806 /* Not a lock call. */
810 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
811 in_file_id_volatile = BVAL(inbody, 0x10);
813 state = tevent_req_data(smb2req->subreq,
814 struct smbd_smb2_lock_state);
816 /* Strange - is this even possible ? */
820 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
821 if (fsp_curr == NULL) {
822 /* Strange - is this even possible ? */
826 if (fsp_curr != fsp) {
827 /* It's not our fid */
833 /* Remove the entries from the lock db. */
834 brl_lock_cancel(br_lck,
842 /* Finally cancel the request. */
843 smb2req->cancelled = true;
844 tevent_req_cancel(smb2req->subreq);