2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "lib/util/server_id.h"
30 #include "locking/proto.h"
31 #include "smbd/globals.h"
32 #include "dbwrap/dbwrap.h"
33 #include "dbwrap/dbwrap_open.h"
39 #define DBGC_CLASS DBGC_LOCKING
43 /* The open brlock.tdb database. */
45 static struct db_context *brlock_db;
47 struct byte_range_lock {
48 struct files_struct *fsp;
49 unsigned int num_locks;
51 uint32_t num_read_oplocks;
52 struct lock_struct *lock_data;
53 struct db_record *record;
56 /****************************************************************************
57 Debug info at level 10 for lock struct.
58 ****************************************************************************/
60 static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
62 struct server_id_buf tmp;
64 DBG_DEBUG("[%u]: smblctx = %"PRIu64", tid = %"PRIu32", pid = %s, "
65 "start = %"PRIu64", size = %"PRIu64", fnum = %"PRIu64", "
70 server_id_str_buf(pls->context.pid, &tmp),
74 lock_type_name(pls->lock_type),
75 lock_flav_name(pls->lock_flav));
78 unsigned int brl_num_locks(const struct byte_range_lock *brl)
80 return brl->num_locks;
83 struct files_struct *brl_fsp(struct byte_range_lock *brl)
88 uint32_t brl_num_read_oplocks(const struct byte_range_lock *brl)
90 return brl->num_read_oplocks;
93 void brl_set_num_read_oplocks(struct byte_range_lock *brl,
94 uint32_t num_read_oplocks)
96 DEBUG(10, ("Setting num_read_oplocks to %"PRIu32"\n",
98 SMB_ASSERT(brl->record != NULL); /* otherwise we're readonly */
99 brl->num_read_oplocks = num_read_oplocks;
100 brl->modified = true;
103 /****************************************************************************
104 See if two locking contexts are equal.
105 ****************************************************************************/
107 static bool brl_same_context(const struct lock_context *ctx1,
108 const struct lock_context *ctx2)
110 return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
111 (ctx1->smblctx == ctx2->smblctx) &&
112 (ctx1->tid == ctx2->tid));
115 /****************************************************************************
116 See if lck1 and lck2 overlap.
117 ****************************************************************************/
119 static bool brl_overlap(const struct lock_struct *lck1,
120 const struct lock_struct *lck2)
122 /* XXX Remove for Win7 compatibility. */
123 /* this extra check is not redundant - it copes with locks
124 that go beyond the end of 64 bit file space */
125 if (lck1->size != 0 &&
126 lck1->start == lck2->start &&
127 lck1->size == lck2->size) {
131 if (lck1->start >= (lck2->start+lck2->size) ||
132 lck2->start >= (lck1->start+lck1->size)) {
138 /****************************************************************************
139 See if lock2 can be added when lock1 is in place.
140 ****************************************************************************/
142 static bool brl_conflict(const struct lock_struct *lck1,
143 const struct lock_struct *lck2)
145 /* Ignore PENDING locks. */
146 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
149 /* Read locks never conflict. */
150 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
154 /* A READ lock can stack on top of a WRITE lock if they have the same
156 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
157 brl_same_context(&lck1->context, &lck2->context) &&
158 lck1->fnum == lck2->fnum) {
162 return brl_overlap(lck1, lck2);
165 /****************************************************************************
166 See if lock2 can be added when lock1 is in place - when both locks are POSIX
167 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
169 ****************************************************************************/
171 static bool brl_conflict_posix(const struct lock_struct *lck1,
172 const struct lock_struct *lck2)
174 #if defined(DEVELOPER)
175 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
176 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
179 /* Ignore PENDING locks. */
180 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
183 /* Read locks never conflict. */
184 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
188 /* Locks on the same context don't conflict. Ignore fnum. */
189 if (brl_same_context(&lck1->context, &lck2->context)) {
193 /* One is read, the other write, or the context is different,
195 return brl_overlap(lck1, lck2);
199 static bool brl_conflict1(const struct lock_struct *lck1,
200 const struct lock_struct *lck2)
202 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
205 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
209 if (brl_same_context(&lck1->context, &lck2->context) &&
210 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
214 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
218 if (lck1->start >= (lck2->start + lck2->size) ||
219 lck2->start >= (lck1->start + lck1->size)) {
227 /****************************************************************************
228 Check to see if this lock conflicts, but ignore our own locks on the
229 same fnum only. This is the read/write lock check code path.
230 This is never used in the POSIX lock case.
231 ****************************************************************************/
233 static bool brl_conflict_other(const struct lock_struct *lock,
234 const struct lock_struct *rw_probe)
236 if (IS_PENDING_LOCK(lock->lock_type) ||
237 IS_PENDING_LOCK(rw_probe->lock_type)) {
241 if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) {
245 if (lock->lock_flav == POSIX_LOCK &&
246 rw_probe->lock_flav == POSIX_LOCK) {
248 * POSIX flavour locks never conflict here - this is only called
249 * in the read/write path.
254 if (!brl_overlap(lock, rw_probe)) {
256 * I/O can only conflict when overlapping a lock, thus let it
262 if (!brl_same_context(&lock->context, &rw_probe->context)) {
264 * Different process, conflict
269 if (lock->fnum != rw_probe->fnum) {
271 * Different file handle, conflict
276 if ((lock->lock_type == READ_LOCK) &&
277 (rw_probe->lock_type == WRITE_LOCK)) {
279 * Incoming WRITE locks conflict with existing READ locks even
280 * if the context is the same. JRA. See LOCKTEST7 in
287 * I/O request compatible with existing lock, let it pass without
294 /****************************************************************************
295 Check if an unlock overlaps a pending lock.
296 ****************************************************************************/
298 static bool brl_pending_overlap(const struct lock_struct *lock,
299 const struct lock_struct *pend_lock)
301 if ((lock->start <= pend_lock->start) &&
302 (lock->start + lock->size > pend_lock->start)) {
305 if ((lock->start >= pend_lock->start) &&
306 (lock->start < pend_lock->start + pend_lock->size)) {
312 /****************************************************************************
313 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
314 is the same as this one and changes its error code. I wonder if any
315 app depends on this ?
316 ****************************************************************************/
318 static NTSTATUS brl_lock_failed(files_struct *fsp,
319 const struct lock_struct *lock,
322 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
323 /* amazing the little things you learn with a test
324 suite. Locks beyond this offset (as a 64 bit
325 number!) always generate the conflict error code,
326 unless the top bit is set */
327 if (!blocking_lock) {
328 fsp->last_lock_failure = *lock;
330 return NT_STATUS_FILE_LOCK_CONFLICT;
333 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
334 lock->context.tid == fsp->last_lock_failure.context.tid &&
335 lock->fnum == fsp->last_lock_failure.fnum &&
336 lock->start == fsp->last_lock_failure.start) {
337 return NT_STATUS_FILE_LOCK_CONFLICT;
340 if (!blocking_lock) {
341 fsp->last_lock_failure = *lock;
343 return NT_STATUS_LOCK_NOT_GRANTED;
346 /****************************************************************************
347 Open up the brlock.tdb database.
348 ****************************************************************************/
350 void brl_init(bool read_only)
359 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
361 if (!lp_clustering()) {
363 * We can't use the SEQNUM trick to cache brlock
364 * entries in the clustering case because ctdb seqnum
365 * propagation has a delay.
367 tdb_flags |= TDB_SEQNUM;
370 db_path = lock_path(talloc_tos(), "brlock.tdb");
371 if (db_path == NULL) {
372 DEBUG(0, ("out of memory!\n"));
376 brlock_db = db_open(NULL, db_path,
377 SMB_OPEN_DATABASE_TDB_HASH_SIZE, tdb_flags,
378 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
379 DBWRAP_LOCK_ORDER_2, DBWRAP_FLAG_NONE);
381 DEBUG(0,("Failed to open byte range locking database %s\n",
383 TALLOC_FREE(db_path);
386 TALLOC_FREE(db_path);
389 /****************************************************************************
390 Close down the brlock.tdb database.
391 ****************************************************************************/
393 void brl_shutdown(void)
395 TALLOC_FREE(brlock_db);
399 /****************************************************************************
400 Compare two locks for sorting.
401 ****************************************************************************/
403 static int lock_compare(const struct lock_struct *lck1,
404 const struct lock_struct *lck2)
406 if (lck1->start != lck2->start) {
407 return (lck1->start - lck2->start);
409 if (lck2->size != lck1->size) {
410 return ((int)lck1->size - (int)lck2->size);
416 /****************************************************************************
417 Lock a range of bytes - Windows lock semantics.
418 ****************************************************************************/
420 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
421 struct lock_struct *plock, bool blocking_lock)
424 files_struct *fsp = br_lck->fsp;
425 struct lock_struct *locks = br_lck->lock_data;
428 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
430 if ((plock->start + plock->size - 1 < plock->start) &&
432 return NT_STATUS_INVALID_LOCK_RANGE;
435 for (i=0; i < br_lck->num_locks; i++) {
436 /* Do any Windows or POSIX locks conflict ? */
437 if (brl_conflict(&locks[i], plock)) {
438 if (!serverid_exists(&locks[i].context.pid)) {
439 locks[i].context.pid.pid = 0;
440 br_lck->modified = true;
443 /* Remember who blocked us. */
444 plock->context.smblctx = locks[i].context.smblctx;
445 return brl_lock_failed(fsp,plock,blocking_lock);
448 if (plock->start == 0 && plock->size == 0 &&
449 locks[i].size == 0) {
455 if (!IS_PENDING_LOCK(plock->lock_type)) {
456 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
459 /* We can get the Windows lock, now see if it needs to
460 be mapped into a lower level POSIX one, and if so can
463 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
465 if (!set_posix_lock_windows_flavour(fsp,
474 /* We don't know who blocked us. */
475 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
477 if (errno_ret == EACCES || errno_ret == EAGAIN) {
478 status = NT_STATUS_FILE_LOCK_CONFLICT;
481 status = map_nt_error_from_unix(errno);
487 /* no conflicts - add it to the list of locks */
488 locks = talloc_realloc(br_lck, locks, struct lock_struct,
489 (br_lck->num_locks + 1));
491 status = NT_STATUS_NO_MEMORY;
495 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
496 br_lck->num_locks += 1;
497 br_lck->lock_data = locks;
498 br_lck->modified = True;
502 if (!IS_PENDING_LOCK(plock->lock_type)) {
503 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
508 /****************************************************************************
509 Cope with POSIX range splits and merges.
510 ****************************************************************************/
512 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
513 struct lock_struct *ex, /* existing lock. */
514 struct lock_struct *plock) /* proposed lock. */
516 bool lock_types_differ = (ex->lock_type != plock->lock_type);
518 /* We can't merge non-conflicting locks on different context - ignore fnum. */
520 if (!brl_same_context(&ex->context, &plock->context)) {
522 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
526 /* We now know we have the same context. */
528 /* Did we overlap ? */
530 /*********************************************
541 **********************************************/
543 if ( (ex->start > (plock->start + plock->size)) ||
544 (plock->start > (ex->start + ex->size))) {
546 /* No overlap with this lock - copy existing. */
548 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
552 /*********************************************
553 +---------------------------+
555 +---------------------------+
556 +---------------------------+
557 | plock | -> replace with plock.
558 +---------------------------+
563 +---------------------------+
564 | plock | -> replace with plock.
565 +---------------------------+
567 **********************************************/
569 if ( (ex->start >= plock->start) &&
570 (ex->start + ex->size <= plock->start + plock->size) ) {
572 /* Replace - discard existing lock. */
577 /*********************************************
587 +---------------+-------+
588 | plock | ex | - different lock types.
589 +---------------+-------+
591 +-----------------------+
592 | plock | - same lock type.
593 +-----------------------+
594 **********************************************/
596 if (plock->start + plock->size == ex->start) {
598 /* If the lock types are the same, we merge, if different, we
599 add the remainder of the old lock. */
601 if (lock_types_differ) {
603 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
606 /* Merge - adjust incoming lock as we may have more
607 * merging to come. */
608 plock->size += ex->size;
613 /*********************************************
622 +-------+---------------+
623 | ex | plock | - different lock types
624 +-------+---------------+
627 +-----------------------+
628 | plock | - same lock type.
629 +-----------------------+
631 **********************************************/
633 if (ex->start + ex->size == plock->start) {
635 /* If the lock types are the same, we merge, if different, we
636 add the existing lock. */
638 if (lock_types_differ) {
639 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
642 /* Merge - adjust incoming lock as we may have more
643 * merging to come. */
644 plock->start = ex->start;
645 plock->size += ex->size;
650 /*********************************************
652 +-----------------------+
654 +-----------------------+
667 +---------------+-------+
668 | plock | ex | - different lock types.
669 +---------------+-------+
671 +-----------------------+
672 | plock | - same lock type.
673 +-----------------------+
674 **********************************************/
676 if ( (ex->start >= plock->start) &&
677 (ex->start <= plock->start + plock->size) &&
678 (ex->start + ex->size > plock->start + plock->size) ) {
680 /* If the lock types are the same, we merge, if different, we
681 add the remainder of the old lock. */
683 if (lock_types_differ) {
684 /* Add remaining existing. */
685 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
686 /* Adjust existing start and size. */
687 lck_arr[0].start = plock->start + plock->size;
688 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
691 /* Merge - adjust incoming lock as we may have more
692 * merging to come. */
693 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
698 /*********************************************
700 +-----------------------+
702 +-----------------------+
715 +-------+---------------+
716 | ex | plock | - different lock types
717 +-------+---------------+
720 +-----------------------+
721 | plock | - same lock type.
722 +-----------------------+
724 **********************************************/
726 if ( (ex->start < plock->start) &&
727 (ex->start + ex->size >= plock->start) &&
728 (ex->start + ex->size <= plock->start + plock->size) ) {
730 /* If the lock types are the same, we merge, if different, we
731 add the truncated old lock. */
733 if (lock_types_differ) {
734 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
735 /* Adjust existing size. */
736 lck_arr[0].size = plock->start - ex->start;
739 /* Merge - adjust incoming lock as we may have more
740 * merging to come. MUST ADJUST plock SIZE FIRST ! */
741 plock->size += (plock->start - ex->start);
742 plock->start = ex->start;
747 /*********************************************
749 +---------------------------+
751 +---------------------------+
756 +-------+---------+---------+
757 | ex | plock | ex | - different lock types.
758 +-------+---------+---------+
760 +---------------------------+
761 | plock | - same lock type.
762 +---------------------------+
763 **********************************************/
765 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
767 if (lock_types_differ) {
769 /* We have to split ex into two locks here. */
771 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
772 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
774 /* Adjust first existing size. */
775 lck_arr[0].size = plock->start - ex->start;
777 /* Adjust second existing start and size. */
778 lck_arr[1].start = plock->start + plock->size;
779 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
782 /* Just eat the existing locks, merge them into plock. */
783 plock->start = ex->start;
784 plock->size = ex->size;
789 /* Never get here. */
790 smb_panic("brlock_posix_split_merge");
793 /* Keep some compilers happy. */
797 /****************************************************************************
798 Lock a range of bytes - POSIX lock semantics.
799 We must cope with range splits and merges.
800 ****************************************************************************/
802 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
803 struct byte_range_lock *br_lck,
804 struct lock_struct *plock)
806 unsigned int i, count, posix_count;
807 struct lock_struct *locks = br_lck->lock_data;
808 struct lock_struct *tp;
809 bool signal_pending_read = False;
810 bool break_oplocks = false;
813 /* No zero-zero locks for POSIX. */
814 if (plock->start == 0 && plock->size == 0) {
815 return NT_STATUS_INVALID_PARAMETER;
818 /* Don't allow 64-bit lock wrap. */
819 if (plock->start + plock->size - 1 < plock->start) {
820 return NT_STATUS_INVALID_PARAMETER;
823 /* The worst case scenario here is we have to split an
824 existing POSIX lock range into two, and add our lock,
825 so we need at most 2 more entries. */
827 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2);
829 return NT_STATUS_NO_MEMORY;
832 count = posix_count = 0;
834 for (i=0; i < br_lck->num_locks; i++) {
835 struct lock_struct *curr_lock = &locks[i];
837 /* If we have a pending read lock, a lock downgrade should
838 trigger a lock re-evaluation. */
839 if (curr_lock->lock_type == PENDING_READ_LOCK &&
840 brl_pending_overlap(plock, curr_lock)) {
841 signal_pending_read = True;
844 if (curr_lock->lock_flav == WINDOWS_LOCK) {
845 /* Do any Windows flavour locks conflict ? */
846 if (brl_conflict(curr_lock, plock)) {
847 if (!serverid_exists(&curr_lock->context.pid)) {
848 curr_lock->context.pid.pid = 0;
849 br_lck->modified = true;
852 /* No games with error messages. */
854 /* Remember who blocked us. */
855 plock->context.smblctx = curr_lock->context.smblctx;
856 return NT_STATUS_FILE_LOCK_CONFLICT;
858 /* Just copy the Windows lock into the new array. */
859 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
862 unsigned int tmp_count = 0;
864 /* POSIX conflict semantics are different. */
865 if (brl_conflict_posix(curr_lock, plock)) {
866 if (!serverid_exists(&curr_lock->context.pid)) {
867 curr_lock->context.pid.pid = 0;
868 br_lck->modified = true;
871 /* Can't block ourselves with POSIX locks. */
872 /* No games with error messages. */
874 /* Remember who blocked us. */
875 plock->context.smblctx = curr_lock->context.smblctx;
876 return NT_STATUS_FILE_LOCK_CONFLICT;
879 /* Work out overlaps. */
880 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
881 posix_count += tmp_count;
887 * Break oplocks while we hold a brl. Since lock() and unlock() calls
888 * are not symetric with POSIX semantics, we cannot guarantee our
889 * contend_level2_oplocks_begin/end calls will be acquired and
890 * released one-for-one as with Windows semantics. Therefore we only
891 * call contend_level2_oplocks_begin if this is the first POSIX brl on
894 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
897 contend_level2_oplocks_begin(br_lck->fsp,
898 LEVEL2_CONTEND_POSIX_BRL);
901 /* Try and add the lock in order, sorted by lock start. */
902 for (i=0; i < count; i++) {
903 struct lock_struct *curr_lock = &tp[i];
905 if (curr_lock->start <= plock->start) {
911 memmove(&tp[i+1], &tp[i],
912 (count - i)*sizeof(struct lock_struct));
914 memcpy(&tp[i], plock, sizeof(struct lock_struct));
917 /* We can get the POSIX lock, now see if it needs to
918 be mapped into a lower level POSIX one, and if so can
921 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
924 /* The lower layer just needs to attempt to
925 get the system POSIX lock. We've weeded out
926 any conflicts above. */
928 if (!set_posix_lock_posix_flavour(br_lck->fsp,
935 /* We don't know who blocked us. */
936 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
938 if (errno_ret == EACCES || errno_ret == EAGAIN) {
940 status = NT_STATUS_FILE_LOCK_CONFLICT;
944 status = map_nt_error_from_unix(errno);
950 /* If we didn't use all the allocated size,
951 * Realloc so we don't leak entries per lock call. */
952 if (count < br_lck->num_locks + 2) {
953 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
955 status = NT_STATUS_NO_MEMORY;
960 br_lck->num_locks = count;
961 TALLOC_FREE(br_lck->lock_data);
962 br_lck->lock_data = tp;
964 br_lck->modified = True;
966 /* A successful downgrade from write to read lock can trigger a lock
967 re-evalutation where waiting readers can now proceed. */
969 if (signal_pending_read) {
970 /* Send unlock messages to any pending read waiters that overlap. */
971 for (i=0; i < br_lck->num_locks; i++) {
972 struct lock_struct *pend_lock = &locks[i];
974 /* Ignore non-pending locks. */
975 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
979 if (pend_lock->lock_type == PENDING_READ_LOCK &&
980 brl_pending_overlap(plock, pend_lock)) {
981 struct server_id_buf tmp;
983 DEBUG(10, ("brl_lock_posix: sending unlock "
984 "message to pid %s\n",
985 server_id_str_buf(pend_lock->context.pid,
988 messaging_send(msg_ctx, pend_lock->context.pid,
989 MSG_SMB_UNLOCK, &data_blob_null);
997 contend_level2_oplocks_end(br_lck->fsp,
998 LEVEL2_CONTEND_POSIX_BRL);
1003 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
1004 struct byte_range_lock *br_lck,
1005 struct lock_struct *plock,
1008 VFS_FIND(brl_lock_windows);
1009 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
1013 /****************************************************************************
1014 Lock a range of bytes.
1015 ****************************************************************************/
1017 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
1018 struct byte_range_lock *br_lck,
1020 struct server_id pid,
1023 enum brl_type lock_type,
1024 enum brl_flavour lock_flav,
1029 struct lock_struct lock;
1034 if (start == 0 && size == 0) {
1035 DEBUG(0,("client sent 0/0 lock - please report this\n"));
1039 lock = (struct lock_struct) {
1040 .context.smblctx = smblctx,
1042 .context.tid = br_lck->fsp->conn->cnum,
1045 .fnum = br_lck->fsp->fnum,
1046 .lock_type = lock_type,
1047 .lock_flav = lock_flav
1050 if (lock_flav == WINDOWS_LOCK) {
1051 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
1052 &lock, blocking_lock);
1054 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
1058 /* sort the lock list */
1059 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
1062 /* If we're returning an error, return who blocked us. */
1063 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
1064 *psmblctx = lock.context.smblctx;
1069 static void brl_delete_lock_struct(struct lock_struct *locks,
1073 if (del_idx >= num_locks) {
1076 memmove(&locks[del_idx], &locks[del_idx+1],
1077 sizeof(*locks) * (num_locks - del_idx - 1));
1080 /****************************************************************************
1081 Unlock a range of bytes - Windows semantics.
1082 ****************************************************************************/
1084 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
1085 struct byte_range_lock *br_lck,
1086 const struct lock_struct *plock)
1089 struct lock_struct *locks = br_lck->lock_data;
1090 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
1092 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
1095 /* Delete write locks by preference... The lock list
1096 is sorted in the zero zero case. */
1098 for (i = 0; i < br_lck->num_locks; i++) {
1099 struct lock_struct *lock = &locks[i];
1101 if (lock->lock_type == WRITE_LOCK &&
1102 brl_same_context(&lock->context, &plock->context) &&
1103 lock->fnum == plock->fnum &&
1104 lock->lock_flav == WINDOWS_LOCK &&
1105 lock->start == plock->start &&
1106 lock->size == plock->size) {
1108 /* found it - delete it */
1109 deleted_lock_type = lock->lock_type;
1114 if (i != br_lck->num_locks) {
1115 /* We found it - don't search again. */
1116 goto unlock_continue;
1120 for (i = 0; i < br_lck->num_locks; i++) {
1121 struct lock_struct *lock = &locks[i];
1123 if (IS_PENDING_LOCK(lock->lock_type)) {
1127 /* Only remove our own locks that match in start, size, and flavour. */
1128 if (brl_same_context(&lock->context, &plock->context) &&
1129 lock->fnum == plock->fnum &&
1130 lock->lock_flav == WINDOWS_LOCK &&
1131 lock->start == plock->start &&
1132 lock->size == plock->size ) {
1133 deleted_lock_type = lock->lock_type;
1138 if (i == br_lck->num_locks) {
1139 /* we didn't find it */
1147 brl_delete_lock_struct(locks, br_lck->num_locks, i);
1148 br_lck->num_locks -= 1;
1149 br_lck->modified = True;
1151 /* Unlock the underlying POSIX regions. */
1152 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1153 release_posix_lock_windows_flavour(br_lck->fsp,
1162 /* Send unlock messages to any pending waiters that overlap. */
1163 for (j=0; j < br_lck->num_locks; j++) {
1164 struct lock_struct *pend_lock = &locks[j];
1166 /* Ignore non-pending locks. */
1167 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1171 /* We could send specific lock info here... */
1172 if (brl_pending_overlap(plock, pend_lock)) {
1173 struct server_id_buf tmp;
1175 DEBUG(10, ("brl_unlock: sending unlock message to "
1177 server_id_str_buf(pend_lock->context.pid,
1180 messaging_send(msg_ctx, pend_lock->context.pid,
1181 MSG_SMB_UNLOCK, &data_blob_null);
1185 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1189 /****************************************************************************
1190 Unlock a range of bytes - POSIX semantics.
1191 ****************************************************************************/
1193 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1194 struct byte_range_lock *br_lck,
1195 struct lock_struct *plock)
1197 unsigned int i, j, count;
1198 struct lock_struct *tp;
1199 struct lock_struct *locks = br_lck->lock_data;
1200 bool overlap_found = False;
1202 /* No zero-zero locks for POSIX. */
1203 if (plock->start == 0 && plock->size == 0) {
1207 /* Don't allow 64-bit lock wrap. */
1208 if (plock->start + plock->size < plock->start ||
1209 plock->start + plock->size < plock->size) {
1210 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1214 /* The worst case scenario here is we have to split an
1215 existing POSIX lock range into two, so we need at most
1218 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1);
1220 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1225 for (i = 0; i < br_lck->num_locks; i++) {
1226 struct lock_struct *lock = &locks[i];
1227 unsigned int tmp_count;
1229 /* Only remove our own locks - ignore fnum. */
1230 if (IS_PENDING_LOCK(lock->lock_type) ||
1231 !brl_same_context(&lock->context, &plock->context)) {
1232 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1237 if (lock->lock_flav == WINDOWS_LOCK) {
1238 /* Do any Windows flavour locks conflict ? */
1239 if (brl_conflict(lock, plock)) {
1243 /* Just copy the Windows lock into the new array. */
1244 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1249 /* Work out overlaps. */
1250 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1252 if (tmp_count == 0) {
1253 /* plock overlapped the existing lock completely,
1254 or replaced it. Don't copy the existing lock. */
1255 overlap_found = true;
1256 } else if (tmp_count == 1) {
1257 /* Either no overlap, (simple copy of existing lock) or
1258 * an overlap of an existing lock. */
1259 /* If the lock changed size, we had an overlap. */
1260 if (tp[count].size != lock->size) {
1261 overlap_found = true;
1264 } else if (tmp_count == 2) {
1265 /* We split a lock range in two. */
1266 overlap_found = true;
1269 /* Optimisation... */
1270 /* We know we're finished here as we can't overlap any
1271 more POSIX locks. Copy the rest of the lock array. */
1273 if (i < br_lck->num_locks - 1) {
1274 memcpy(&tp[count], &locks[i+1],
1275 sizeof(*locks)*((br_lck->num_locks-1) - i));
1276 count += ((br_lck->num_locks-1) - i);
1283 if (!overlap_found) {
1284 /* Just ignore - no change. */
1286 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1290 /* Unlock any POSIX regions. */
1291 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1292 release_posix_lock_posix_flavour(br_lck->fsp,
1300 /* Realloc so we don't leak entries per unlock call. */
1302 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
1304 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1308 /* We deleted the last lock. */
1313 contend_level2_oplocks_end(br_lck->fsp,
1314 LEVEL2_CONTEND_POSIX_BRL);
1316 br_lck->num_locks = count;
1317 TALLOC_FREE(br_lck->lock_data);
1319 br_lck->lock_data = tp;
1320 br_lck->modified = True;
1322 /* Send unlock messages to any pending waiters that overlap. */
1324 for (j=0; j < br_lck->num_locks; j++) {
1325 struct lock_struct *pend_lock = &locks[j];
1327 /* Ignore non-pending locks. */
1328 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1332 /* We could send specific lock info here... */
1333 if (brl_pending_overlap(plock, pend_lock)) {
1334 struct server_id_buf tmp;
1336 DEBUG(10, ("brl_unlock: sending unlock message to "
1338 server_id_str_buf(pend_lock->context.pid,
1341 messaging_send(msg_ctx, pend_lock->context.pid,
1342 MSG_SMB_UNLOCK, &data_blob_null);
1349 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1350 struct messaging_context *msg_ctx,
1351 struct byte_range_lock *br_lck,
1352 const struct lock_struct *plock)
1354 VFS_FIND(brl_unlock_windows);
1355 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
1359 /****************************************************************************
1360 Unlock a range of bytes.
1361 ****************************************************************************/
1363 bool brl_unlock(struct messaging_context *msg_ctx,
1364 struct byte_range_lock *br_lck,
1366 struct server_id pid,
1369 enum brl_flavour lock_flav)
1371 struct lock_struct lock;
1373 lock.context.smblctx = smblctx;
1374 lock.context.pid = pid;
1375 lock.context.tid = br_lck->fsp->conn->cnum;
1378 lock.fnum = br_lck->fsp->fnum;
1379 lock.lock_type = UNLOCK_LOCK;
1380 lock.lock_flav = lock_flav;
1382 if (lock_flav == WINDOWS_LOCK) {
1383 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1386 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1390 /****************************************************************************
1391 Test if we could add a lock if we wanted to.
1392 Returns True if the region required is currently unlocked, False if locked.
1393 ****************************************************************************/
1395 bool brl_locktest(struct byte_range_lock *br_lck,
1396 const struct lock_struct *rw_probe)
1400 struct lock_struct *locks = br_lck->lock_data;
1401 files_struct *fsp = br_lck->fsp;
1403 /* Make sure existing locks don't conflict */
1404 for (i=0; i < br_lck->num_locks; i++) {
1406 * Our own locks don't conflict.
1408 if (brl_conflict_other(&locks[i], rw_probe)) {
1409 if (br_lck->record == NULL) {
1414 if (!serverid_exists(&locks[i].context.pid)) {
1415 locks[i].context.pid.pid = 0;
1416 br_lck->modified = true;
1425 * There is no lock held by an SMB daemon, check to
1426 * see if there is a POSIX lock from a UNIX or NFS process.
1427 * This only conflicts with Windows locks, not POSIX locks.
1430 if(lp_posix_locking(fsp->conn->params) &&
1431 (rw_probe->lock_flav == WINDOWS_LOCK)) {
1433 * Make copies -- is_posix_locked might modify the values
1436 br_off start = rw_probe->start;
1437 br_off size = rw_probe->size;
1438 enum brl_type lock_type = rw_probe->lock_type;
1440 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1442 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1443 "file %s\n", (uintmax_t)start, (uintmax_t)size,
1444 ret ? "locked" : "unlocked",
1445 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1447 /* We need to return the inverse of is_posix_locked. */
1451 /* no conflicts - we could have added it */
1455 /****************************************************************************
1456 Query for existing locks.
1457 ****************************************************************************/
1459 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1461 struct server_id pid,
1464 enum brl_type *plock_type,
1465 enum brl_flavour lock_flav)
1468 struct lock_struct lock;
1469 const struct lock_struct *locks = br_lck->lock_data;
1470 files_struct *fsp = br_lck->fsp;
1472 lock.context.smblctx = *psmblctx;
1473 lock.context.pid = pid;
1474 lock.context.tid = br_lck->fsp->conn->cnum;
1475 lock.start = *pstart;
1477 lock.fnum = fsp->fnum;
1478 lock.lock_type = *plock_type;
1479 lock.lock_flav = lock_flav;
1481 /* Make sure existing locks don't conflict */
1482 for (i=0; i < br_lck->num_locks; i++) {
1483 const struct lock_struct *exlock = &locks[i];
1484 bool conflict = False;
1486 if (exlock->lock_flav == WINDOWS_LOCK) {
1487 conflict = brl_conflict(exlock, &lock);
1489 conflict = brl_conflict_posix(exlock, &lock);
1493 *psmblctx = exlock->context.smblctx;
1494 *pstart = exlock->start;
1495 *psize = exlock->size;
1496 *plock_type = exlock->lock_type;
1497 return NT_STATUS_LOCK_NOT_GRANTED;
1502 * There is no lock held by an SMB daemon, check to
1503 * see if there is a POSIX lock from a UNIX or NFS process.
1506 if(lp_posix_locking(fsp->conn->params)) {
1507 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1509 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1510 "file %s\n", (uintmax_t)*pstart,
1511 (uintmax_t)*psize, ret ? "locked" : "unlocked",
1512 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1515 /* Hmmm. No clue what to set smblctx to - use -1. */
1516 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1517 return NT_STATUS_LOCK_NOT_GRANTED;
1521 return NT_STATUS_OK;
1525 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1526 struct byte_range_lock *br_lck,
1527 struct lock_struct *plock)
1529 VFS_FIND(brl_cancel_windows);
1530 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock);
1533 /****************************************************************************
1534 Remove a particular pending lock.
1535 ****************************************************************************/
1536 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1538 struct server_id pid,
1541 enum brl_flavour lock_flav)
1544 struct lock_struct lock;
1546 lock.context.smblctx = smblctx;
1547 lock.context.pid = pid;
1548 lock.context.tid = br_lck->fsp->conn->cnum;
1551 lock.fnum = br_lck->fsp->fnum;
1552 lock.lock_flav = lock_flav;
1553 /* lock.lock_type doesn't matter */
1555 if (lock_flav == WINDOWS_LOCK) {
1556 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1559 ret = brl_lock_cancel_default(br_lck, &lock);
1565 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1566 struct lock_struct *plock)
1569 struct lock_struct *locks = br_lck->lock_data;
1573 for (i = 0; i < br_lck->num_locks; i++) {
1574 struct lock_struct *lock = &locks[i];
1576 /* For pending locks we *always* care about the fnum. */
1577 if (brl_same_context(&lock->context, &plock->context) &&
1578 lock->fnum == plock->fnum &&
1579 IS_PENDING_LOCK(lock->lock_type) &&
1580 lock->lock_flav == plock->lock_flav &&
1581 lock->start == plock->start &&
1582 lock->size == plock->size) {
1587 if (i == br_lck->num_locks) {
1588 /* Didn't find it. */
1592 brl_delete_lock_struct(locks, br_lck->num_locks, i);
1593 br_lck->num_locks -= 1;
1594 br_lck->modified = True;
1598 /****************************************************************************
1599 Remove any locks associated with a open file.
1600 We return True if this process owns any other Windows locks on this
1601 fd and so we should not immediately close the fd.
1602 ****************************************************************************/
1604 void brl_close_fnum(struct messaging_context *msg_ctx,
1605 struct byte_range_lock *br_lck)
1607 files_struct *fsp = br_lck->fsp;
1608 uint32_t tid = fsp->conn->cnum;
1609 uint64_t fnum = fsp->fnum;
1611 struct lock_struct *locks = br_lck->lock_data;
1612 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1613 struct lock_struct *locks_copy;
1614 unsigned int num_locks_copy;
1616 /* Copy the current lock array. */
1617 if (br_lck->num_locks) {
1618 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1620 smb_panic("brl_close_fnum: talloc failed");
1626 num_locks_copy = br_lck->num_locks;
1628 for (i=0; i < num_locks_copy; i++) {
1629 struct lock_struct *lock = &locks_copy[i];
1631 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
1632 (lock->fnum == fnum)) {
1635 lock->context.smblctx,
1644 bool brl_mark_disconnected(struct files_struct *fsp)
1646 uint32_t tid = fsp->conn->cnum;
1648 uint64_t fnum = fsp->fnum;
1650 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1651 struct byte_range_lock *br_lck = NULL;
1653 if (fsp->op == NULL) {
1657 smblctx = fsp->op->global->open_persistent_id;
1659 if (!fsp->op->global->durable) {
1663 if (fsp->current_lock_count == 0) {
1667 br_lck = brl_get_locks(talloc_tos(), fsp);
1668 if (br_lck == NULL) {
1672 for (i=0; i < br_lck->num_locks; i++) {
1673 struct lock_struct *lock = &br_lck->lock_data[i];
1676 * as this is a durable handle, we only expect locks
1677 * of the current file handle!
1680 if (lock->context.smblctx != smblctx) {
1681 TALLOC_FREE(br_lck);
1685 if (lock->context.tid != tid) {
1686 TALLOC_FREE(br_lck);
1690 if (!serverid_equal(&lock->context.pid, &self)) {
1691 TALLOC_FREE(br_lck);
1695 if (lock->fnum != fnum) {
1696 TALLOC_FREE(br_lck);
1700 server_id_set_disconnected(&lock->context.pid);
1701 lock->context.tid = TID_FIELD_INVALID;
1702 lock->fnum = FNUM_FIELD_INVALID;
1705 br_lck->modified = true;
1706 TALLOC_FREE(br_lck);
1710 bool brl_reconnect_disconnected(struct files_struct *fsp)
1712 uint32_t tid = fsp->conn->cnum;
1714 uint64_t fnum = fsp->fnum;
1716 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1717 struct byte_range_lock *br_lck = NULL;
1719 if (fsp->op == NULL) {
1723 smblctx = fsp->op->global->open_persistent_id;
1725 if (!fsp->op->global->durable) {
1730 * When reconnecting, we do not want to validate the brlock entries
1731 * and thereby remove our own (disconnected) entries but reactivate
1735 br_lck = brl_get_locks(talloc_tos(), fsp);
1736 if (br_lck == NULL) {
1740 if (br_lck->num_locks == 0) {
1741 TALLOC_FREE(br_lck);
1745 for (i=0; i < br_lck->num_locks; i++) {
1746 struct lock_struct *lock = &br_lck->lock_data[i];
1749 * as this is a durable handle we only expect locks
1750 * of the current file handle!
1753 if (lock->context.smblctx != smblctx) {
1754 TALLOC_FREE(br_lck);
1758 if (lock->context.tid != TID_FIELD_INVALID) {
1759 TALLOC_FREE(br_lck);
1763 if (!server_id_is_disconnected(&lock->context.pid)) {
1764 TALLOC_FREE(br_lck);
1768 if (lock->fnum != FNUM_FIELD_INVALID) {
1769 TALLOC_FREE(br_lck);
1773 lock->context.pid = self;
1774 lock->context.tid = tid;
1778 fsp->current_lock_count = br_lck->num_locks;
1779 br_lck->modified = true;
1780 TALLOC_FREE(br_lck);
1784 struct brl_forall_cb {
1785 void (*fn)(struct file_id id, struct server_id pid,
1786 enum brl_type lock_type,
1787 enum brl_flavour lock_flav,
1788 br_off start, br_off size,
1789 void *private_data);
1793 /****************************************************************************
1794 Traverse the whole database with this function, calling traverse_callback
1796 ****************************************************************************/
1798 static int brl_traverse_fn(struct db_record *rec, void *state)
1800 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1801 struct lock_struct *locks;
1802 struct file_id *key;
1804 unsigned int num_locks = 0;
1808 dbkey = dbwrap_record_get_key(rec);
1809 value = dbwrap_record_get_value(rec);
1811 /* In a traverse function we must make a copy of
1812 dbuf before modifying it. */
1814 locks = (struct lock_struct *)talloc_memdup(
1815 talloc_tos(), value.dptr, value.dsize);
1817 return -1; /* Terminate traversal. */
1820 key = (struct file_id *)dbkey.dptr;
1821 num_locks = value.dsize/sizeof(*locks);
1824 for ( i=0; i<num_locks; i++) {
1826 locks[i].context.pid,
1839 /*******************************************************************
1840 Call the specified function on each lock in the database.
1841 ********************************************************************/
1843 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1844 enum brl_type lock_type,
1845 enum brl_flavour lock_flav,
1846 br_off start, br_off size,
1847 void *private_data),
1850 struct brl_forall_cb cb;
1858 cb.private_data = private_data;
1859 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1861 if (!NT_STATUS_IS_OK(status)) {
1868 /*******************************************************************
1869 Store a potentially modified set of byte range lock data back into
1872 ********************************************************************/
1874 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1877 struct lock_struct *locks = br_lck->lock_data;
1879 if (!br_lck->modified) {
1880 DEBUG(10, ("br_lck not modified\n"));
1886 while (i < br_lck->num_locks) {
1887 if (locks[i].context.pid.pid == 0) {
1889 * Autocleanup, the process conflicted and does not
1892 locks[i] = locks[br_lck->num_locks-1];
1893 br_lck->num_locks -= 1;
1899 if ((br_lck->num_locks == 0) && (br_lck->num_read_oplocks == 0)) {
1900 /* No locks - delete this entry. */
1901 NTSTATUS status = dbwrap_record_delete(br_lck->record);
1902 if (!NT_STATUS_IS_OK(status)) {
1903 DEBUG(0, ("delete_rec returned %s\n",
1904 nt_errstr(status)));
1905 smb_panic("Could not delete byte range lock entry");
1908 size_t lock_len, data_len;
1912 lock_len = br_lck->num_locks * sizeof(struct lock_struct);
1913 data_len = lock_len + sizeof(br_lck->num_read_oplocks);
1915 data.dsize = data_len;
1916 data.dptr = talloc_array(talloc_tos(), uint8_t, data_len);
1917 SMB_ASSERT(data.dptr != NULL);
1920 memcpy(data.dptr, br_lck->lock_data, lock_len);
1922 memcpy(data.dptr + lock_len, &br_lck->num_read_oplocks,
1923 sizeof(br_lck->num_read_oplocks));
1925 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1926 TALLOC_FREE(data.dptr);
1927 if (!NT_STATUS_IS_OK(status)) {
1928 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1929 smb_panic("Could not store byte range mode entry");
1933 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db)));
1936 br_lck->modified = false;
1937 TALLOC_FREE(br_lck->record);
1940 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1942 byte_range_lock_flush(br_lck);
1946 static bool brl_parse_data(struct byte_range_lock *br_lck, TDB_DATA data)
1950 if (data.dsize == 0) {
1953 if (data.dsize % sizeof(struct lock_struct) !=
1954 sizeof(br_lck->num_read_oplocks)) {
1955 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data.dsize));
1959 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1960 data_len = br_lck->num_locks * sizeof(struct lock_struct);
1962 br_lck->lock_data = talloc_memdup(br_lck, data.dptr, data_len);
1963 if (br_lck->lock_data == NULL) {
1964 DEBUG(1, ("talloc_memdup failed\n"));
1967 memcpy(&br_lck->num_read_oplocks, data.dptr + data_len,
1968 sizeof(br_lck->num_read_oplocks));
1972 /*******************************************************************
1973 Fetch a set of byte range lock data from the database.
1974 Leave the record locked.
1975 TALLOC_FREE(brl) will release the lock in the destructor.
1976 ********************************************************************/
1978 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
1981 struct byte_range_lock *br_lck;
1983 br_lck = talloc_zero(mem_ctx, struct byte_range_lock);
1984 if (br_lck == NULL) {
1990 key.dptr = (uint8_t *)&fsp->file_id;
1991 key.dsize = sizeof(struct file_id);
1993 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1995 if (br_lck->record == NULL) {
1996 DEBUG(3, ("Could not lock byte range lock entry\n"));
1997 TALLOC_FREE(br_lck);
2001 data = dbwrap_record_get_value(br_lck->record);
2003 if (!brl_parse_data(br_lck, data)) {
2004 TALLOC_FREE(br_lck);
2008 talloc_set_destructor(br_lck, byte_range_lock_destructor);
2010 if (DEBUGLEVEL >= 10) {
2012 struct lock_struct *locks = br_lck->lock_data;
2013 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
2015 file_id_string_tos(&fsp->file_id)));
2016 for( i = 0; i < br_lck->num_locks; i++) {
2017 print_lock_struct(i, &locks[i]);
2024 struct brl_get_locks_readonly_state {
2025 TALLOC_CTX *mem_ctx;
2026 struct byte_range_lock **br_lock;
2029 static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data,
2032 struct brl_get_locks_readonly_state *state =
2033 (struct brl_get_locks_readonly_state *)private_data;
2034 struct byte_range_lock *br_lck;
2036 br_lck = talloc_pooled_object(
2037 state->mem_ctx, struct byte_range_lock, 1, data.dsize);
2038 if (br_lck == NULL) {
2039 *state->br_lock = NULL;
2042 *br_lck = (struct byte_range_lock) { 0 };
2043 if (!brl_parse_data(br_lck, data)) {
2044 *state->br_lock = NULL;
2047 *state->br_lock = br_lck;
2050 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
2052 struct byte_range_lock *br_lock = NULL;
2053 struct brl_get_locks_readonly_state state;
2056 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
2057 dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum));
2059 if ((fsp->brlock_rec != NULL)
2060 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
2062 * We have cached the brlock_rec and the database did not
2065 return fsp->brlock_rec;
2069 * Parse the record fresh from the database
2072 state.mem_ctx = fsp;
2073 state.br_lock = &br_lock;
2075 status = dbwrap_parse_record(
2077 make_tdb_data((uint8_t *)&fsp->file_id,
2078 sizeof(fsp->file_id)),
2079 brl_get_locks_readonly_parser, &state);
2081 if (NT_STATUS_EQUAL(status,NT_STATUS_NOT_FOUND)) {
2083 * No locks on this file. Return an empty br_lock.
2085 br_lock = talloc(fsp, struct byte_range_lock);
2086 if (br_lock == NULL) {
2090 br_lock->num_read_oplocks = 0;
2091 br_lock->num_locks = 0;
2092 br_lock->lock_data = NULL;
2094 } else if (!NT_STATUS_IS_OK(status)) {
2095 DEBUG(3, ("Could not parse byte range lock record: "
2096 "%s\n", nt_errstr(status)));
2099 if (br_lock == NULL) {
2104 br_lock->modified = false;
2105 br_lock->record = NULL;
2108 * Cache the brlock struct, invalidated when the dbwrap_seqnum
2109 * changes. See beginning of this routine.
2111 TALLOC_FREE(fsp->brlock_rec);
2112 fsp->brlock_rec = br_lock;
2113 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
2118 struct brl_revalidate_state {
2121 struct server_id *pids;
2125 * Collect PIDs of all processes with pending entries
2128 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
2129 enum brl_type lock_type,
2130 enum brl_flavour lock_flav,
2131 br_off start, br_off size,
2134 struct brl_revalidate_state *state =
2135 (struct brl_revalidate_state *)private_data;
2137 if (!IS_PENDING_LOCK(lock_type)) {
2141 add_to_large_array(state, sizeof(pid), (void *)&pid,
2142 &state->pids, &state->num_pids,
2143 &state->array_size);
2147 * qsort callback to sort the processes
2150 static int compare_procids(const void *p1, const void *p2)
2152 const struct server_id *i1 = (const struct server_id *)p1;
2153 const struct server_id *i2 = (const struct server_id *)p2;
2155 if (i1->pid < i2->pid) return -1;
2156 if (i1->pid > i2->pid) return 1;
2161 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2162 * locks so that they retry. Mainly used in the cluster code after a node has
2165 * Done in two steps to avoid double-sends: First we collect all entries in an
2166 * array, then qsort that array and only send to non-dupes.
2169 void brl_revalidate(struct messaging_context *msg_ctx,
2172 struct server_id server_id,
2175 struct brl_revalidate_state *state;
2177 struct server_id last_pid;
2179 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
2180 DEBUG(0, ("talloc failed\n"));
2184 brl_forall(brl_revalidate_collect, state);
2186 if (state->array_size == -1) {
2187 DEBUG(0, ("talloc failed\n"));
2191 if (state->num_pids == 0) {
2195 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2197 ZERO_STRUCT(last_pid);
2199 for (i=0; i<state->num_pids; i++) {
2200 if (serverid_equal(&last_pid, &state->pids[i])) {
2202 * We've seen that one already
2207 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2209 last_pid = state->pids[i];
2217 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
2220 TALLOC_CTX *frame = talloc_stackframe();
2222 struct db_record *rec;
2223 struct lock_struct *lock;
2227 key = make_tdb_data((void*)&fid, sizeof(fid));
2229 rec = dbwrap_fetch_locked(brlock_db, frame, key);
2231 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
2232 "for file %s\n", file_id_string(frame, &fid)));
2236 val = dbwrap_record_get_value(rec);
2237 lock = (struct lock_struct*)val.dptr;
2238 num = val.dsize / sizeof(struct lock_struct);
2240 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
2241 "file %s\n", file_id_string(frame, &fid)));
2246 for (n=0; n<num; n++) {
2247 struct lock_context *ctx = &lock[n].context;
2249 if (!server_id_is_disconnected(&ctx->pid)) {
2250 struct server_id_buf tmp;
2251 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2252 "%s used by server %s, do not cleanup\n",
2253 file_id_string(frame, &fid),
2254 server_id_str_buf(ctx->pid, &tmp)));
2258 if (ctx->smblctx != open_persistent_id) {
2259 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2260 "%s expected smblctx %llu but found %llu"
2261 ", do not cleanup\n",
2262 file_id_string(frame, &fid),
2263 (unsigned long long)open_persistent_id,
2264 (unsigned long long)ctx->smblctx));
2269 status = dbwrap_record_delete(rec);
2270 if (!NT_STATUS_IS_OK(status)) {
2271 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
2272 "for file %s from %s, open %llu: %s\n",
2273 file_id_string(frame, &fid), dbwrap_name(brlock_db),
2274 (unsigned long long)open_persistent_id,
2275 nt_errstr(status)));
2279 DEBUG(10, ("brl_cleanup_disconnected: "
2280 "file %s cleaned up %u entries from open %llu\n",
2281 file_id_string(frame, &fid), num,
2282 (unsigned long long)open_persistent_id));