2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "smbd/smbd.h"
22 #include "smbd/globals.h"
24 #include "lib/util/tevent_ntstatus.h"
25 #include "lib/dbwrap/dbwrap_watch.h"
26 #include "librpc/gen_ndr/ndr_open_files.h"
29 #define DBGC_CLASS DBGC_LOCKING
31 /****************************************************************************
32 We need a version of timeval_min that treats zero timval as infinite.
33 ****************************************************************************/
35 struct timeval timeval_brl_min(const struct timeval *tv1,
36 const struct timeval *tv2)
38 if (timeval_is_zero(tv1)) {
41 if (timeval_is_zero(tv2)) {
44 return timeval_min(tv1, tv2);
47 NTSTATUS smbd_do_locks_try(
48 struct files_struct *fsp,
49 enum brl_flavour lock_flav,
51 struct smbd_lock_element *locks,
52 uint16_t *blocker_idx,
53 struct server_id *blocking_pid,
54 uint64_t *blocking_smblctx)
56 NTSTATUS status = NT_STATUS_OK;
59 for (i=0; i<num_locks; i++) {
60 struct smbd_lock_element *e = &locks[i];
71 if (!NT_STATUS_IS_OK(status)) {
76 if (NT_STATUS_IS_OK(status)) {
83 * Undo the locks we successfully got
85 for (i = i-1; i != UINT16_MAX; i--) {
86 struct smbd_lock_element *e = &locks[i];
97 static bool smbd_smb1_fsp_add_blocked_lock_req(
98 struct files_struct *fsp, struct tevent_req *req)
100 size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
101 struct tevent_req **tmp = NULL;
103 tmp = talloc_realloc(
105 fsp->blocked_smb1_lock_reqs,
111 fsp->blocked_smb1_lock_reqs = tmp;
112 fsp->blocked_smb1_lock_reqs[num_reqs] = req;
116 struct smbd_smb1_do_locks_state {
117 struct tevent_context *ev;
118 struct messaging_context *msg_ctx;
119 struct smb_request *smbreq;
120 struct files_struct *fsp;
121 struct timeval endtime;
122 bool large_offset; /* required for correct cancel */
123 enum brl_flavour lock_flav;
125 struct smbd_lock_element *locks;
129 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
130 static void smbd_smb1_blocked_locks_cleanup(
131 struct tevent_req *req, enum tevent_req_state req_state);
133 struct tevent_req *smbd_smb1_do_locks_send(
135 struct tevent_context *ev,
136 struct messaging_context *msg_ctx,
137 struct smb_request **smbreq, /* talloc_move()d into our state */
138 struct files_struct *fsp,
141 enum brl_flavour lock_flav,
143 struct smbd_lock_element *locks)
145 struct tevent_req *req = NULL, *subreq = NULL;
146 struct smbd_smb1_do_locks_state *state = NULL;
147 struct share_mode_lock *lck = NULL;
148 struct server_id blocking_pid = { 0 };
149 uint64_t blocking_smblctx = 0;
150 struct timeval endtime;
151 NTSTATUS status = NT_STATUS_OK;
154 req = tevent_req_create(
155 mem_ctx, &state, struct smbd_smb1_do_locks_state);
160 state->msg_ctx = msg_ctx;
161 state->smbreq = talloc_move(state, smbreq);
163 state->large_offset = large_offset;
164 state->lock_flav = lock_flav;
165 state->num_locks = num_locks;
166 state->locks = locks;
168 DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
170 if (num_locks == 0) {
171 DBG_DEBUG("no locks\n");
172 tevent_req_done(req);
173 return tevent_req_post(req, ev);
176 if ((timeout != 0) && (timeout != UINT32_MAX)) {
178 * Windows internal resolution for blocking locks
179 * seems to be about 200ms... Don't wait for less than
182 timeout = MAX(timeout, lp_lock_spin_time());
185 lck = get_existing_share_mode_lock(state, state->fsp->file_id);
186 if (tevent_req_nomem(lck, req)) {
187 DBG_DEBUG("Could not get share mode lock\n");
188 return tevent_req_post(req, ev);
191 status = smbd_do_locks_try(
199 if (NT_STATUS_IS_OK(status)) {
200 tevent_req_done(req);
203 if (!ERROR_WAS_LOCK_DENIED(status)) {
204 tevent_req_nterror(req, status);
209 struct smbd_lock_element *blocker = &locks[state->blocker];
211 if ((blocker->offset >= 0xEF000000) &&
212 ((blocker->offset >> 63) == 0)) {
214 * This must be an optimization of an ancient
217 timeout = lp_lock_spin_time();
220 if ((fsp->lock_failure_seen) &&
221 (blocker->offset == fsp->lock_failure_offset)) {
223 * Delay repeated lock attempts on the same
224 * lock. Maybe a more advanced version of the
227 DBG_DEBUG("Delaying lock request due to previous "
229 timeout = lp_lock_spin_time();
233 DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
238 tevent_req_nterror(req, status);
242 subreq = dbwrap_watched_watch_send(
243 state, state->ev, lck->data->record, blocking_pid);
244 if (tevent_req_nomem(subreq, req)) {
248 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
250 state->endtime = timeval_current_ofs_msec(timeout);
251 endtime = state->endtime;
253 if (blocking_smblctx == UINT64_MAX) {
256 DBG_DEBUG("Blocked on a posix lock. Retry in one second\n");
258 tmp = timeval_current_ofs(1, 0);
259 endtime = timeval_min(&endtime, &tmp);
262 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
268 ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
273 tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
277 return tevent_req_post(req, ev);
280 static void smbd_smb1_blocked_locks_cleanup(
281 struct tevent_req *req, enum tevent_req_state req_state)
283 struct smbd_smb1_do_locks_state *state = tevent_req_data(
284 req, struct smbd_smb1_do_locks_state);
285 struct files_struct *fsp = state->fsp;
286 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
287 size_t num_blocked = talloc_array_length(blocked);
290 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
295 if (req_state == TEVENT_REQ_RECEIVED) {
296 DBG_DEBUG("already received\n");
300 for (i=0; i<num_blocked; i++) {
301 if (blocked[i] == req) {
305 SMB_ASSERT(i<num_blocked);
307 num_after = num_blocked - (i+1);
311 * The locks need to be kept in order, see
312 * raw.lock.multilock2
316 sizeof(*blocked) * num_after);
318 fsp->blocked_smb1_lock_reqs = talloc_realloc(
319 fsp, blocked, struct tevent_req *, num_blocked-1);
322 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
324 struct tevent_req *req = tevent_req_callback_data(
325 subreq, struct tevent_req);
326 struct smbd_smb1_do_locks_state *state = tevent_req_data(
327 req, struct smbd_smb1_do_locks_state);
328 struct files_struct *fsp = state->fsp;
329 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
330 struct tevent_req *retry_req = blocked[0];
331 struct smbd_smb1_do_locks_state *retry_state = tevent_req_data(
332 retry_req, struct smbd_smb1_do_locks_state);
333 struct share_mode_lock *lck;
334 struct timeval endtime;
335 struct server_id blocking_pid = { 0 };
336 uint64_t blocking_smblctx = 0;
340 status = dbwrap_watched_watch_recv(subreq, NULL, NULL);
343 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
346 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
347 double elapsed = timeval_elapsed(&state->endtime);
349 smbd_smb1_brl_finish_by_req(
350 req, NT_STATUS_FILE_LOCK_CONFLICT);
354 * This is a posix lock retry. Just retry.
358 lck = get_existing_share_mode_lock(state, fsp->file_id);
359 if (tevent_req_nomem(lck, req)) {
360 DBG_DEBUG("Could not get share mode lock\n");
364 status = smbd_do_locks_try(
366 retry_state->lock_flav,
367 retry_state->num_locks,
372 if (NT_STATUS_IS_OK(status)) {
375 if (!ERROR_WAS_LOCK_DENIED(status)) {
379 subreq = dbwrap_watched_watch_send(
380 state, state->ev, lck->data->record, blocking_pid);
381 if (tevent_req_nomem(subreq, req)) {
385 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
387 endtime = state->endtime;
389 if (blocking_smblctx == UINT64_MAX) {
392 DBG_DEBUG("Blocked on a posix lock. Retry in one second\n");
394 tmp = timeval_current_ofs(1, 0);
395 endtime = timeval_min(&endtime, &tmp);
398 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
400 status = NT_STATUS_NO_MEMORY;
406 smbd_smb1_brl_finish_by_req(req, status);
409 NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
411 struct smbd_smb1_do_locks_state *state = tevent_req_data(
412 req, struct smbd_smb1_do_locks_state);
413 NTSTATUS status = NT_STATUS_OK;
416 err = tevent_req_is_nterror(req, &status);
418 DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
420 if (tevent_req_is_nterror(req, &status)) {
421 struct files_struct *fsp = state->fsp;
422 struct smbd_lock_element *blocker =
423 &state->locks[state->blocker];
425 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
428 fsp->lock_failure_seen = true;
429 fsp->lock_failure_offset = blocker->offset;
433 tevent_req_received(req);
438 bool smbd_smb1_do_locks_extract_smbreq(
439 struct tevent_req *req,
441 struct smb_request **psmbreq)
443 struct smbd_smb1_do_locks_state *state = tevent_req_data(
444 req, struct smbd_smb1_do_locks_state);
446 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
451 if (state->smbreq == NULL) {
454 *psmbreq = talloc_move(mem_ctx, &state->smbreq);
458 void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
460 DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
462 if (NT_STATUS_IS_OK(status)) {
463 tevent_req_done(req);
465 tevent_req_nterror(req, status);
469 bool smbd_smb1_brl_finish_by_lock(
470 struct files_struct *fsp,
472 enum brl_flavour lock_flav,
473 struct smbd_lock_element lock,
474 NTSTATUS finish_status)
476 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
477 size_t num_blocked = talloc_array_length(blocked);
480 DBG_DEBUG("num_blocked=%zu\n", num_blocked);
482 for (i=0; i<num_blocked; i++) {
483 struct tevent_req *req = blocked[i];
484 struct smbd_smb1_do_locks_state *state = tevent_req_data(
485 req, struct smbd_smb1_do_locks_state);
488 DBG_DEBUG("i=%zu, req=%p\n", i, req);
490 if ((state->large_offset != large_offset) ||
491 (state->lock_flav != lock_flav)) {
495 for (j=0; j<state->num_locks; j++) {
496 struct smbd_lock_element *l = &state->locks[j];
498 if ((lock.smblctx == l->smblctx) &&
499 (lock.offset == l->offset) &&
500 (lock.count == l->count)) {
501 smbd_smb1_brl_finish_by_req(
510 static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
511 struct files_struct *fsp, void *private_data)
513 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
514 size_t num_blocked = talloc_array_length(blocked);
515 uint64_t mid = *((uint64_t *)private_data);
518 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
520 for (i=0; i<num_blocked; i++) {
521 struct tevent_req *req = blocked[i];
522 struct smbd_smb1_do_locks_state *state = tevent_req_data(
523 req, struct smbd_smb1_do_locks_state);
524 struct smb_request *smbreq = state->smbreq;
526 if (smbreq->mid == mid) {
527 tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
536 * This walks the list of fsps, we store the blocked reqs attached to
537 * them. It can be expensive, but this is legacy SMB1 and trying to
538 * remember looking at traces I don't reall many of those calls.
541 bool smbd_smb1_brl_finish_by_mid(
542 struct smbd_server_connection *sconn, uint64_t mid)
544 struct files_struct *found = files_forall(
545 sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
546 return (found != NULL);