2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "smbd/smbd.h"
22 #include "smbd/globals.h"
24 #include "lib/util/tevent_ntstatus.h"
25 #include "lib/dbwrap/dbwrap_watch.h"
26 #include "librpc/gen_ndr/ndr_open_files.h"
29 #define DBGC_CLASS DBGC_LOCKING
31 NTSTATUS smbd_do_locks_try(
32 struct files_struct *fsp,
33 enum brl_flavour lock_flav,
35 struct smbd_lock_element *locks,
36 uint16_t *blocker_idx,
37 struct server_id *blocking_pid,
38 uint64_t *blocking_smblctx)
40 NTSTATUS status = NT_STATUS_OK;
43 for (i=0; i<num_locks; i++) {
44 struct smbd_lock_element *e = &locks[i];
55 if (!NT_STATUS_IS_OK(status)) {
60 if (NT_STATUS_IS_OK(status)) {
67 * Undo the locks we successfully got
69 for (i = i-1; i != UINT16_MAX; i--) {
70 struct smbd_lock_element *e = &locks[i];
81 static bool smbd_smb1_fsp_add_blocked_lock_req(
82 struct files_struct *fsp, struct tevent_req *req)
84 size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
85 struct tevent_req **tmp = NULL;
89 fsp->blocked_smb1_lock_reqs,
95 fsp->blocked_smb1_lock_reqs = tmp;
96 fsp->blocked_smb1_lock_reqs[num_reqs] = req;
100 struct smbd_smb1_do_locks_state {
101 struct tevent_context *ev;
102 struct smb_request *smbreq;
103 struct files_struct *fsp;
105 uint32_t polling_msecs;
106 struct timeval endtime;
107 bool large_offset; /* required for correct cancel */
108 enum brl_flavour lock_flav;
110 struct smbd_lock_element *locks;
112 NTSTATUS deny_status;
115 static void smbd_smb1_do_locks_try(struct tevent_req *req);
116 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
117 static void smbd_smb1_blocked_locks_cleanup(
118 struct tevent_req *req, enum tevent_req_state req_state);
120 static void smbd_smb1_do_locks_setup_timeout(
121 struct smbd_smb1_do_locks_state *state,
122 const struct smbd_lock_element *blocker)
124 struct files_struct *fsp = state->fsp;
126 if (!timeval_is_zero(&state->endtime)) {
133 if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
135 * Windows internal resolution for blocking locks
136 * seems to be about 200ms... Don't wait for less than
139 state->timeout = MAX(state->timeout, lp_lock_spin_time());
142 if (state->timeout != 0) {
146 if (blocker == NULL) {
150 if ((blocker->offset >= 0xEF000000) &&
151 ((blocker->offset >> 63) == 0)) {
153 * This must be an optimization of an ancient
156 state->timeout = lp_lock_spin_time();
159 if ((fsp->lock_failure_seen) &&
160 (blocker->offset == fsp->lock_failure_offset)) {
162 * Delay repeated lock attempts on the same
163 * lock. Maybe a more advanced version of the
166 DBG_DEBUG("Delaying lock request due to previous "
168 state->timeout = lp_lock_spin_time();
173 * Note state->timeout might still 0,
174 * but that's ok, as we don't want to retry
177 state->endtime = timeval_add(&state->smbreq->request_time,
178 state->timeout / 1000,
179 (state->timeout % 1000) * 1000);
182 static void smbd_smb1_do_locks_update_polling_msecs(
183 struct smbd_smb1_do_locks_state *state)
186 * The default lp_lock_spin_time() is 200ms.
188 * v_min is in the range of 0.002 to 20 secs
189 * (0.2 secs by default)
191 * v_max is in the range of 0.02 to 200 secs
192 * (2.0 secs by default)
194 * The typical steps are:
195 * 0.2, 0.4, 0.6, 0.8, ... 2.0
197 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
198 uint32_t v_max = 10 * v_min;
200 if (state->polling_msecs >= v_max) {
201 state->polling_msecs = v_max;
205 state->polling_msecs += v_min;
208 struct tevent_req *smbd_smb1_do_locks_send(
210 struct tevent_context *ev,
211 struct smb_request **smbreq, /* talloc_move()d into our state */
212 struct files_struct *fsp,
213 uint32_t lock_timeout,
215 enum brl_flavour lock_flav,
217 struct smbd_lock_element *locks)
219 struct tevent_req *req = NULL, *subreq = NULL;
220 struct smbd_smb1_do_locks_state *state = NULL;
221 struct share_mode_lock *lck = NULL;
222 struct server_id blocking_pid = { 0 };
223 uint64_t blocking_smblctx = 0;
224 struct timeval endtime = { 0 };
225 NTSTATUS status = NT_STATUS_OK;
229 req = tevent_req_create(
230 mem_ctx, &state, struct smbd_smb1_do_locks_state);
235 state->smbreq = talloc_move(state, smbreq);
237 state->timeout = lock_timeout;
238 state->large_offset = large_offset;
239 state->lock_flav = lock_flav;
240 state->num_locks = num_locks;
241 state->locks = locks;
243 if (lock_flav == POSIX_LOCK) {
245 * SMB1 posix locks always use
246 * NT_STATUS_FILE_LOCK_CONFLICT.
248 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
250 state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
253 DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
255 if (num_locks == 0) {
256 DBG_DEBUG("no locks\n");
257 tevent_req_done(req);
258 return tevent_req_post(req, ev);
261 lck = get_existing_share_mode_lock(state, state->fsp->file_id);
262 if (tevent_req_nomem(lck, req)) {
263 DBG_DEBUG("Could not get share mode lock\n");
264 return tevent_req_post(req, ev);
267 status = smbd_do_locks_try(
275 if (NT_STATUS_IS_OK(status)) {
276 tevent_req_done(req);
279 if (!ERROR_WAS_LOCK_DENIED(status)) {
280 tevent_req_nterror(req, status);
284 smbd_smb1_do_locks_setup_timeout(state, &locks[state->blocker]);
285 DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
290 * The client specified timeout expired
291 * avoid further retries.
293 * Otherwise keep waiting either waiting
294 * for changes in locking.tdb or the polling
295 * mode timers waiting for posix locks.
297 * If the endtime is not elapsed yet,
298 * it means we'll retry after a timeout.
299 * In that case we'll have to return
300 * NT_STATUS_FILE_LOCK_CONFLICT
301 * instead of NT_STATUS_LOCK_NOT_GRANTED.
303 expired = timeval_expired(&state->endtime);
305 status = state->deny_status;
306 tevent_req_nterror(req, status);
309 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
311 endtime = state->endtime;
313 if (blocking_smblctx == UINT64_MAX) {
316 smbd_smb1_do_locks_update_polling_msecs(state);
318 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
319 state->polling_msecs);
321 tmp = timeval_current_ofs_msec(state->polling_msecs);
322 endtime = timeval_min(&endtime, &tmp);
325 subreq = dbwrap_watched_watch_send(
326 state, state->ev, lck->data->record, blocking_pid);
327 if (tevent_req_nomem(subreq, req)) {
331 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
333 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
339 ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
344 tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
348 return tevent_req_post(req, ev);
351 static void smbd_smb1_blocked_locks_cleanup(
352 struct tevent_req *req, enum tevent_req_state req_state)
354 struct smbd_smb1_do_locks_state *state = tevent_req_data(
355 req, struct smbd_smb1_do_locks_state);
356 struct files_struct *fsp = state->fsp;
357 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
358 size_t num_blocked = talloc_array_length(blocked);
361 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
366 if (req_state == TEVENT_REQ_RECEIVED) {
367 DBG_DEBUG("already received\n");
371 for (i=0; i<num_blocked; i++) {
372 if (blocked[i] == req) {
376 SMB_ASSERT(i<num_blocked);
378 num_after = num_blocked - (i+1);
382 * The locks need to be kept in order, see
383 * raw.lock.multilock2
387 sizeof(*blocked) * num_after);
389 fsp->blocked_smb1_lock_reqs = talloc_realloc(
390 fsp, blocked, struct tevent_req *, num_blocked-1);
393 static void smbd_smb1_do_locks_try(struct tevent_req *req)
395 struct smbd_smb1_do_locks_state *state = tevent_req_data(
396 req, struct smbd_smb1_do_locks_state);
397 struct files_struct *fsp = state->fsp;
398 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
399 struct tevent_req *retry_req = blocked[0];
400 struct smbd_smb1_do_locks_state *retry_state = tevent_req_data(
401 retry_req, struct smbd_smb1_do_locks_state);
402 struct share_mode_lock *lck;
403 struct timeval endtime = { 0 };
404 struct server_id blocking_pid = { 0 };
405 uint64_t blocking_smblctx = 0;
406 struct tevent_req *subreq = NULL;
411 lck = get_existing_share_mode_lock(state, fsp->file_id);
412 if (tevent_req_nomem(lck, req)) {
413 DBG_DEBUG("Could not get share mode lock\n");
417 status = smbd_do_locks_try(
419 retry_state->lock_flav,
420 retry_state->num_locks,
425 if (NT_STATUS_IS_OK(status)) {
428 if (!ERROR_WAS_LOCK_DENIED(status)) {
433 * The client specified timeout expired
434 * avoid further retries.
436 * Otherwise keep waiting either waiting
437 * for changes in locking.tdb or the polling
438 * mode timers waiting for posix locks.
440 * If the endtime is not expired yet,
441 * it means we'll retry after a timeout.
442 * In that case we'll have to return
443 * NT_STATUS_FILE_LOCK_CONFLICT
444 * instead of NT_STATUS_LOCK_NOT_GRANTED.
446 expired = timeval_expired(&state->endtime);
448 status = state->deny_status;
451 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
453 endtime = state->endtime;
455 if (blocking_smblctx == UINT64_MAX) {
458 smbd_smb1_do_locks_update_polling_msecs(state);
460 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
461 state->polling_msecs);
463 tmp = timeval_current_ofs_msec(state->polling_msecs);
464 endtime = timeval_min(&endtime, &tmp);
467 subreq = dbwrap_watched_watch_send(
468 state, state->ev, lck->data->record, blocking_pid);
469 if (tevent_req_nomem(subreq, req)) {
473 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
475 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
477 status = NT_STATUS_NO_MEMORY;
483 smbd_smb1_brl_finish_by_req(req, status);
486 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
488 struct tevent_req *req = tevent_req_callback_data(
489 subreq, struct tevent_req);
490 struct smbd_smb1_do_locks_state *state = tevent_req_data(
491 req, struct smbd_smb1_do_locks_state);
496 * Make sure we run as the user again
498 ok = change_to_user_by_fsp(state->fsp);
500 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
504 status = dbwrap_watched_watch_recv(subreq, NULL, NULL);
507 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
511 * We ignore any errors here, it's most likely
512 * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
514 * In any case we can just give it a retry.
517 smbd_smb1_do_locks_try(req);
520 NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
522 struct smbd_smb1_do_locks_state *state = tevent_req_data(
523 req, struct smbd_smb1_do_locks_state);
524 NTSTATUS status = NT_STATUS_OK;
527 err = tevent_req_is_nterror(req, &status);
529 DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
531 if (tevent_req_is_nterror(req, &status)) {
532 struct files_struct *fsp = state->fsp;
533 struct smbd_lock_element *blocker =
534 &state->locks[state->blocker];
536 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
539 fsp->lock_failure_seen = true;
540 fsp->lock_failure_offset = blocker->offset;
544 tevent_req_received(req);
549 bool smbd_smb1_do_locks_extract_smbreq(
550 struct tevent_req *req,
552 struct smb_request **psmbreq)
554 struct smbd_smb1_do_locks_state *state = tevent_req_data(
555 req, struct smbd_smb1_do_locks_state);
557 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
562 if (state->smbreq == NULL) {
565 *psmbreq = talloc_move(mem_ctx, &state->smbreq);
569 void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
571 DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
573 if (NT_STATUS_IS_OK(status)) {
574 tevent_req_done(req);
576 tevent_req_nterror(req, status);
580 bool smbd_smb1_brl_finish_by_lock(
581 struct files_struct *fsp,
583 enum brl_flavour lock_flav,
584 struct smbd_lock_element lock,
585 NTSTATUS finish_status)
587 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
588 size_t num_blocked = talloc_array_length(blocked);
591 DBG_DEBUG("num_blocked=%zu\n", num_blocked);
593 for (i=0; i<num_blocked; i++) {
594 struct tevent_req *req = blocked[i];
595 struct smbd_smb1_do_locks_state *state = tevent_req_data(
596 req, struct smbd_smb1_do_locks_state);
599 DBG_DEBUG("i=%zu, req=%p\n", i, req);
601 if ((state->large_offset != large_offset) ||
602 (state->lock_flav != lock_flav)) {
606 for (j=0; j<state->num_locks; j++) {
607 struct smbd_lock_element *l = &state->locks[j];
609 if ((lock.smblctx == l->smblctx) &&
610 (lock.offset == l->offset) &&
611 (lock.count == l->count)) {
612 smbd_smb1_brl_finish_by_req(
621 static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
622 struct files_struct *fsp, void *private_data)
624 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
625 size_t num_blocked = talloc_array_length(blocked);
626 uint64_t mid = *((uint64_t *)private_data);
629 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
631 for (i=0; i<num_blocked; i++) {
632 struct tevent_req *req = blocked[i];
633 struct smbd_smb1_do_locks_state *state = tevent_req_data(
634 req, struct smbd_smb1_do_locks_state);
635 struct smb_request *smbreq = state->smbreq;
637 if (smbreq->mid == mid) {
638 tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
647 * This walks the list of fsps, we store the blocked reqs attached to
648 * them. It can be expensive, but this is legacy SMB1 and trying to
649 * remember looking at traces I don't reall many of those calls.
652 bool smbd_smb1_brl_finish_by_mid(
653 struct smbd_server_connection *sconn, uint64_t mid)
655 struct files_struct *found = files_forall(
656 sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
657 return (found != NULL);