2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
31 #include "dbwrap/dbwrap.h"
32 #include "dbwrap/dbwrap_open.h"
38 #define DBGC_CLASS DBGC_LOCKING
42 /* The open brlock.tdb database. */
44 static struct db_context *brlock_db;
46 struct byte_range_lock {
47 struct files_struct *fsp;
48 unsigned int num_locks;
52 struct lock_struct *lock_data;
53 struct db_record *record;
56 /****************************************************************************
57 Debug info at level 10 for lock struct.
58 ****************************************************************************/
60 static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
62 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
64 (unsigned long long)pls->context.smblctx,
65 (unsigned int)pls->context.tid,
66 server_id_str(talloc_tos(), &pls->context.pid) ));
68 DEBUG(10,("start = %.0f, size = %.0f, fnum = %llu, %s %s\n",
71 (unsigned long long)pls->fnum,
72 lock_type_name(pls->lock_type),
73 lock_flav_name(pls->lock_flav) ));
76 unsigned int brl_num_locks(const struct byte_range_lock *brl)
78 return brl->num_locks;
81 struct files_struct *brl_fsp(struct byte_range_lock *brl)
86 /****************************************************************************
87 See if two locking contexts are equal.
88 ****************************************************************************/
90 static bool brl_same_context(const struct lock_context *ctx1,
91 const struct lock_context *ctx2)
93 return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
94 (ctx1->smblctx == ctx2->smblctx) &&
95 (ctx1->tid == ctx2->tid));
98 /****************************************************************************
99 See if lck1 and lck2 overlap.
100 ****************************************************************************/
102 static bool brl_overlap(const struct lock_struct *lck1,
103 const struct lock_struct *lck2)
105 /* XXX Remove for Win7 compatibility. */
106 /* this extra check is not redundant - it copes with locks
107 that go beyond the end of 64 bit file space */
108 if (lck1->size != 0 &&
109 lck1->start == lck2->start &&
110 lck1->size == lck2->size) {
114 if (lck1->start >= (lck2->start+lck2->size) ||
115 lck2->start >= (lck1->start+lck1->size)) {
121 /****************************************************************************
122 See if lock2 can be added when lock1 is in place.
123 ****************************************************************************/
125 static bool brl_conflict(const struct lock_struct *lck1,
126 const struct lock_struct *lck2)
128 /* Ignore PENDING locks. */
129 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
132 /* Read locks never conflict. */
133 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137 /* A READ lock can stack on top of a WRITE lock if they have the same
139 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
140 brl_same_context(&lck1->context, &lck2->context) &&
141 lck1->fnum == lck2->fnum) {
145 return brl_overlap(lck1, lck2);
148 /****************************************************************************
149 See if lock2 can be added when lock1 is in place - when both locks are POSIX
150 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
152 ****************************************************************************/
154 static bool brl_conflict_posix(const struct lock_struct *lck1,
155 const struct lock_struct *lck2)
157 #if defined(DEVELOPER)
158 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
159 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
162 /* Ignore PENDING locks. */
163 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
166 /* Read locks never conflict. */
167 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
171 /* Locks on the same context con't conflict. Ignore fnum. */
172 if (brl_same_context(&lck1->context, &lck2->context)) {
176 /* One is read, the other write, or the context is different,
178 return brl_overlap(lck1, lck2);
182 static bool brl_conflict1(const struct lock_struct *lck1,
183 const struct lock_struct *lck2)
185 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
188 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
192 if (brl_same_context(&lck1->context, &lck2->context) &&
193 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
197 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
201 if (lck1->start >= (lck2->start + lck2->size) ||
202 lck2->start >= (lck1->start + lck1->size)) {
210 /****************************************************************************
211 Check to see if this lock conflicts, but ignore our own locks on the
212 same fnum only. This is the read/write lock check code path.
213 This is never used in the POSIX lock case.
214 ****************************************************************************/
216 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
218 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
221 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
224 /* POSIX flavour locks never conflict here - this is only called
225 in the read/write path. */
227 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
231 * Incoming WRITE locks conflict with existing READ locks even
232 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
235 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
236 if (brl_same_context(&lck1->context, &lck2->context) &&
237 lck1->fnum == lck2->fnum)
241 return brl_overlap(lck1, lck2);
244 /****************************************************************************
245 Check if an unlock overlaps a pending lock.
246 ****************************************************************************/
248 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
250 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
252 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
257 /****************************************************************************
258 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
259 is the same as this one and changes its error code. I wonder if any
260 app depends on this ?
261 ****************************************************************************/
263 static NTSTATUS brl_lock_failed(files_struct *fsp,
264 const struct lock_struct *lock,
267 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
268 /* amazing the little things you learn with a test
269 suite. Locks beyond this offset (as a 64 bit
270 number!) always generate the conflict error code,
271 unless the top bit is set */
272 if (!blocking_lock) {
273 fsp->last_lock_failure = *lock;
275 return NT_STATUS_FILE_LOCK_CONFLICT;
278 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
279 lock->context.tid == fsp->last_lock_failure.context.tid &&
280 lock->fnum == fsp->last_lock_failure.fnum &&
281 lock->start == fsp->last_lock_failure.start) {
282 return NT_STATUS_FILE_LOCK_CONFLICT;
285 if (!blocking_lock) {
286 fsp->last_lock_failure = *lock;
288 return NT_STATUS_LOCK_NOT_GRANTED;
291 /****************************************************************************
292 Open up the brlock.tdb database.
293 ****************************************************************************/
295 void brl_init(bool read_only)
303 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
305 if (!lp_clustering()) {
307 * We can't use the SEQNUM trick to cache brlock
308 * entries in the clustering case because ctdb seqnum
309 * propagation has a delay.
311 tdb_flags |= TDB_SEQNUM;
314 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
315 lp_open_files_db_hash_size(), tdb_flags,
316 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
317 DBWRAP_LOCK_ORDER_2);
319 DEBUG(0,("Failed to open byte range locking database %s\n",
320 lock_path("brlock.tdb")));
325 /****************************************************************************
326 Close down the brlock.tdb database.
327 ****************************************************************************/
329 void brl_shutdown(void)
331 TALLOC_FREE(brlock_db);
335 /****************************************************************************
336 Compare two locks for sorting.
337 ****************************************************************************/
339 static int lock_compare(const struct lock_struct *lck1,
340 const struct lock_struct *lck2)
342 if (lck1->start != lck2->start) {
343 return (lck1->start - lck2->start);
345 if (lck2->size != lck1->size) {
346 return ((int)lck1->size - (int)lck2->size);
352 /****************************************************************************
353 Lock a range of bytes - Windows lock semantics.
354 ****************************************************************************/
356 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
357 struct lock_struct *plock, bool blocking_lock)
360 files_struct *fsp = br_lck->fsp;
361 struct lock_struct *locks = br_lck->lock_data;
364 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
366 if ((plock->start + plock->size - 1 < plock->start) &&
368 return NT_STATUS_INVALID_LOCK_RANGE;
371 for (i=0; i < br_lck->num_locks; i++) {
372 /* Do any Windows or POSIX locks conflict ? */
373 if (brl_conflict(&locks[i], plock)) {
374 /* Remember who blocked us. */
375 plock->context.smblctx = locks[i].context.smblctx;
376 return brl_lock_failed(fsp,plock,blocking_lock);
379 if (plock->start == 0 && plock->size == 0 &&
380 locks[i].size == 0) {
386 if (!IS_PENDING_LOCK(plock->lock_type)) {
387 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
390 /* We can get the Windows lock, now see if it needs to
391 be mapped into a lower level POSIX one, and if so can
394 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
396 if (!set_posix_lock_windows_flavour(fsp,
405 /* We don't know who blocked us. */
406 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
408 if (errno_ret == EACCES || errno_ret == EAGAIN) {
409 status = NT_STATUS_FILE_LOCK_CONFLICT;
412 status = map_nt_error_from_unix(errno);
418 /* no conflicts - add it to the list of locks */
419 locks = talloc_realloc(br_lck, locks, struct lock_struct,
420 (br_lck->num_locks + 1));
422 status = NT_STATUS_NO_MEMORY;
426 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
427 br_lck->num_locks += 1;
428 br_lck->lock_data = locks;
429 br_lck->modified = True;
433 if (!IS_PENDING_LOCK(plock->lock_type)) {
434 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
439 /****************************************************************************
440 Cope with POSIX range splits and merges.
441 ****************************************************************************/
443 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
444 struct lock_struct *ex, /* existing lock. */
445 struct lock_struct *plock) /* proposed lock. */
447 bool lock_types_differ = (ex->lock_type != plock->lock_type);
449 /* We can't merge non-conflicting locks on different context - ignore fnum. */
451 if (!brl_same_context(&ex->context, &plock->context)) {
453 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
457 /* We now know we have the same context. */
459 /* Did we overlap ? */
461 /*********************************************
472 **********************************************/
474 if ( (ex->start > (plock->start + plock->size)) ||
475 (plock->start > (ex->start + ex->size))) {
477 /* No overlap with this lock - copy existing. */
479 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
483 /*********************************************
484 +---------------------------+
486 +---------------------------+
487 +---------------------------+
488 | plock | -> replace with plock.
489 +---------------------------+
494 +---------------------------+
495 | plock | -> replace with plock.
496 +---------------------------+
498 **********************************************/
500 if ( (ex->start >= plock->start) &&
501 (ex->start + ex->size <= plock->start + plock->size) ) {
503 /* Replace - discard existing lock. */
508 /*********************************************
518 +---------------+-------+
519 | plock | ex | - different lock types.
520 +---------------+-------+
522 +-----------------------+
523 | plock | - same lock type.
524 +-----------------------+
525 **********************************************/
527 if (plock->start + plock->size == ex->start) {
529 /* If the lock types are the same, we merge, if different, we
530 add the remainder of the old lock. */
532 if (lock_types_differ) {
534 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
537 /* Merge - adjust incoming lock as we may have more
538 * merging to come. */
539 plock->size += ex->size;
544 /*********************************************
553 +-------+---------------+
554 | ex | plock | - different lock types
555 +-------+---------------+
558 +-----------------------+
559 | plock | - same lock type.
560 +-----------------------+
562 **********************************************/
564 if (ex->start + ex->size == plock->start) {
566 /* If the lock types are the same, we merge, if different, we
567 add the existing lock. */
569 if (lock_types_differ) {
570 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
573 /* Merge - adjust incoming lock as we may have more
574 * merging to come. */
575 plock->start = ex->start;
576 plock->size += ex->size;
581 /*********************************************
583 +-----------------------+
585 +-----------------------+
598 +---------------+-------+
599 | plock | ex | - different lock types.
600 +---------------+-------+
602 +-----------------------+
603 | plock | - same lock type.
604 +-----------------------+
605 **********************************************/
607 if ( (ex->start >= plock->start) &&
608 (ex->start <= plock->start + plock->size) &&
609 (ex->start + ex->size > plock->start + plock->size) ) {
611 /* If the lock types are the same, we merge, if different, we
612 add the remainder of the old lock. */
614 if (lock_types_differ) {
615 /* Add remaining existing. */
616 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
617 /* Adjust existing start and size. */
618 lck_arr[0].start = plock->start + plock->size;
619 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
622 /* Merge - adjust incoming lock as we may have more
623 * merging to come. */
624 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
629 /*********************************************
631 +-----------------------+
633 +-----------------------+
646 +-------+---------------+
647 | ex | plock | - different lock types
648 +-------+---------------+
651 +-----------------------+
652 | plock | - same lock type.
653 +-----------------------+
655 **********************************************/
657 if ( (ex->start < plock->start) &&
658 (ex->start + ex->size >= plock->start) &&
659 (ex->start + ex->size <= plock->start + plock->size) ) {
661 /* If the lock types are the same, we merge, if different, we
662 add the truncated old lock. */
664 if (lock_types_differ) {
665 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
666 /* Adjust existing size. */
667 lck_arr[0].size = plock->start - ex->start;
670 /* Merge - adjust incoming lock as we may have more
671 * merging to come. MUST ADJUST plock SIZE FIRST ! */
672 plock->size += (plock->start - ex->start);
673 plock->start = ex->start;
678 /*********************************************
680 +---------------------------+
682 +---------------------------+
687 +-------+---------+---------+
688 | ex | plock | ex | - different lock types.
689 +-------+---------+---------+
691 +---------------------------+
692 | plock | - same lock type.
693 +---------------------------+
694 **********************************************/
696 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
698 if (lock_types_differ) {
700 /* We have to split ex into two locks here. */
702 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
703 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
705 /* Adjust first existing size. */
706 lck_arr[0].size = plock->start - ex->start;
708 /* Adjust second existing start and size. */
709 lck_arr[1].start = plock->start + plock->size;
710 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
713 /* Just eat the existing locks, merge them into plock. */
714 plock->start = ex->start;
715 plock->size = ex->size;
720 /* Never get here. */
721 smb_panic("brlock_posix_split_merge");
724 /* Keep some compilers happy. */
728 /****************************************************************************
729 Lock a range of bytes - POSIX lock semantics.
730 We must cope with range splits and merges.
731 ****************************************************************************/
733 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
734 struct byte_range_lock *br_lck,
735 struct lock_struct *plock)
737 unsigned int i, count, posix_count;
738 struct lock_struct *locks = br_lck->lock_data;
739 struct lock_struct *tp;
740 bool signal_pending_read = False;
741 bool break_oplocks = false;
744 /* No zero-zero locks for POSIX. */
745 if (plock->start == 0 && plock->size == 0) {
746 return NT_STATUS_INVALID_PARAMETER;
749 /* Don't allow 64-bit lock wrap. */
750 if (plock->start + plock->size - 1 < plock->start) {
751 return NT_STATUS_INVALID_PARAMETER;
754 /* The worst case scenario here is we have to split an
755 existing POSIX lock range into two, and add our lock,
756 so we need at most 2 more entries. */
758 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2);
760 return NT_STATUS_NO_MEMORY;
763 count = posix_count = 0;
765 for (i=0; i < br_lck->num_locks; i++) {
766 struct lock_struct *curr_lock = &locks[i];
768 /* If we have a pending read lock, a lock downgrade should
769 trigger a lock re-evaluation. */
770 if (curr_lock->lock_type == PENDING_READ_LOCK &&
771 brl_pending_overlap(plock, curr_lock)) {
772 signal_pending_read = True;
775 if (curr_lock->lock_flav == WINDOWS_LOCK) {
776 /* Do any Windows flavour locks conflict ? */
777 if (brl_conflict(curr_lock, plock)) {
778 /* No games with error messages. */
780 /* Remember who blocked us. */
781 plock->context.smblctx = curr_lock->context.smblctx;
782 return NT_STATUS_FILE_LOCK_CONFLICT;
784 /* Just copy the Windows lock into the new array. */
785 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
788 unsigned int tmp_count = 0;
790 /* POSIX conflict semantics are different. */
791 if (brl_conflict_posix(curr_lock, plock)) {
792 /* Can't block ourselves with POSIX locks. */
793 /* No games with error messages. */
795 /* Remember who blocked us. */
796 plock->context.smblctx = curr_lock->context.smblctx;
797 return NT_STATUS_FILE_LOCK_CONFLICT;
800 /* Work out overlaps. */
801 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
802 posix_count += tmp_count;
808 * Break oplocks while we hold a brl. Since lock() and unlock() calls
809 * are not symetric with POSIX semantics, we cannot guarantee our
810 * contend_level2_oplocks_begin/end calls will be acquired and
811 * released one-for-one as with Windows semantics. Therefore we only
812 * call contend_level2_oplocks_begin if this is the first POSIX brl on
815 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
818 contend_level2_oplocks_begin(br_lck->fsp,
819 LEVEL2_CONTEND_POSIX_BRL);
822 /* Try and add the lock in order, sorted by lock start. */
823 for (i=0; i < count; i++) {
824 struct lock_struct *curr_lock = &tp[i];
826 if (curr_lock->start <= plock->start) {
832 memmove(&tp[i+1], &tp[i],
833 (count - i)*sizeof(struct lock_struct));
835 memcpy(&tp[i], plock, sizeof(struct lock_struct));
838 /* We can get the POSIX lock, now see if it needs to
839 be mapped into a lower level POSIX one, and if so can
842 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
845 /* The lower layer just needs to attempt to
846 get the system POSIX lock. We've weeded out
847 any conflicts above. */
849 if (!set_posix_lock_posix_flavour(br_lck->fsp,
855 /* We don't know who blocked us. */
856 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
858 if (errno_ret == EACCES || errno_ret == EAGAIN) {
860 status = NT_STATUS_FILE_LOCK_CONFLICT;
864 status = map_nt_error_from_unix(errno);
870 /* If we didn't use all the allocated size,
871 * Realloc so we don't leak entries per lock call. */
872 if (count < br_lck->num_locks + 2) {
873 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
875 status = NT_STATUS_NO_MEMORY;
880 br_lck->num_locks = count;
881 TALLOC_FREE(br_lck->lock_data);
882 br_lck->lock_data = tp;
884 br_lck->modified = True;
886 /* A successful downgrade from write to read lock can trigger a lock
887 re-evalutation where waiting readers can now proceed. */
889 if (signal_pending_read) {
890 /* Send unlock messages to any pending read waiters that overlap. */
891 for (i=0; i < br_lck->num_locks; i++) {
892 struct lock_struct *pend_lock = &locks[i];
894 /* Ignore non-pending locks. */
895 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
899 if (pend_lock->lock_type == PENDING_READ_LOCK &&
900 brl_pending_overlap(plock, pend_lock)) {
901 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
902 procid_str_static(&pend_lock->context.pid )));
904 messaging_send(msg_ctx, pend_lock->context.pid,
905 MSG_SMB_UNLOCK, &data_blob_null);
913 contend_level2_oplocks_end(br_lck->fsp,
914 LEVEL2_CONTEND_POSIX_BRL);
919 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
920 struct byte_range_lock *br_lck,
921 struct lock_struct *plock,
923 struct blocking_lock_record *blr)
925 VFS_FIND(brl_lock_windows);
926 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
930 /****************************************************************************
931 Lock a range of bytes.
932 ****************************************************************************/
934 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
935 struct byte_range_lock *br_lck,
937 struct server_id pid,
940 enum brl_type lock_type,
941 enum brl_flavour lock_flav,
944 struct blocking_lock_record *blr)
947 struct lock_struct lock;
950 if (start == 0 && size == 0) {
951 DEBUG(0,("client sent 0/0 lock - please report this\n"));
956 /* Quieten valgrind on test. */
960 lock.context.smblctx = smblctx;
961 lock.context.pid = pid;
962 lock.context.tid = br_lck->fsp->conn->cnum;
965 lock.fnum = br_lck->fsp->fnum;
966 lock.lock_type = lock_type;
967 lock.lock_flav = lock_flav;
969 if (lock_flav == WINDOWS_LOCK) {
970 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
971 &lock, blocking_lock, blr);
973 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
977 /* sort the lock list */
978 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
981 /* If we're returning an error, return who blocked us. */
982 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
983 *psmblctx = lock.context.smblctx;
988 /****************************************************************************
989 Unlock a range of bytes - Windows semantics.
990 ****************************************************************************/
992 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
993 struct byte_range_lock *br_lck,
994 const struct lock_struct *plock)
997 struct lock_struct *locks = br_lck->lock_data;
998 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
1000 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
1003 /* Delete write locks by preference... The lock list
1004 is sorted in the zero zero case. */
1006 for (i = 0; i < br_lck->num_locks; i++) {
1007 struct lock_struct *lock = &locks[i];
1009 if (lock->lock_type == WRITE_LOCK &&
1010 brl_same_context(&lock->context, &plock->context) &&
1011 lock->fnum == plock->fnum &&
1012 lock->lock_flav == WINDOWS_LOCK &&
1013 lock->start == plock->start &&
1014 lock->size == plock->size) {
1016 /* found it - delete it */
1017 deleted_lock_type = lock->lock_type;
1022 if (i != br_lck->num_locks) {
1023 /* We found it - don't search again. */
1024 goto unlock_continue;
1028 for (i = 0; i < br_lck->num_locks; i++) {
1029 struct lock_struct *lock = &locks[i];
1031 if (IS_PENDING_LOCK(lock->lock_type)) {
1035 /* Only remove our own locks that match in start, size, and flavour. */
1036 if (brl_same_context(&lock->context, &plock->context) &&
1037 lock->fnum == plock->fnum &&
1038 lock->lock_flav == WINDOWS_LOCK &&
1039 lock->start == plock->start &&
1040 lock->size == plock->size ) {
1041 deleted_lock_type = lock->lock_type;
1046 if (i == br_lck->num_locks) {
1047 /* we didn't find it */
1055 /* Actually delete the lock. */
1056 if (i < br_lck->num_locks - 1) {
1057 memmove(&locks[i], &locks[i+1],
1058 sizeof(*locks)*((br_lck->num_locks-1) - i));
1061 br_lck->num_locks -= 1;
1062 br_lck->modified = True;
1064 /* Unlock the underlying POSIX regions. */
1065 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1066 release_posix_lock_windows_flavour(br_lck->fsp,
1075 /* Send unlock messages to any pending waiters that overlap. */
1076 for (j=0; j < br_lck->num_locks; j++) {
1077 struct lock_struct *pend_lock = &locks[j];
1079 /* Ignore non-pending locks. */
1080 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1084 /* We could send specific lock info here... */
1085 if (brl_pending_overlap(plock, pend_lock)) {
1086 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1087 procid_str_static(&pend_lock->context.pid )));
1089 messaging_send(msg_ctx, pend_lock->context.pid,
1090 MSG_SMB_UNLOCK, &data_blob_null);
1094 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1098 /****************************************************************************
1099 Unlock a range of bytes - POSIX semantics.
1100 ****************************************************************************/
1102 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1103 struct byte_range_lock *br_lck,
1104 struct lock_struct *plock)
1106 unsigned int i, j, count;
1107 struct lock_struct *tp;
1108 struct lock_struct *locks = br_lck->lock_data;
1109 bool overlap_found = False;
1111 /* No zero-zero locks for POSIX. */
1112 if (plock->start == 0 && plock->size == 0) {
1116 /* Don't allow 64-bit lock wrap. */
1117 if (plock->start + plock->size < plock->start ||
1118 plock->start + plock->size < plock->size) {
1119 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1123 /* The worst case scenario here is we have to split an
1124 existing POSIX lock range into two, so we need at most
1127 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1);
1129 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1134 for (i = 0; i < br_lck->num_locks; i++) {
1135 struct lock_struct *lock = &locks[i];
1136 unsigned int tmp_count;
1138 /* Only remove our own locks - ignore fnum. */
1139 if (IS_PENDING_LOCK(lock->lock_type) ||
1140 !brl_same_context(&lock->context, &plock->context)) {
1141 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1146 if (lock->lock_flav == WINDOWS_LOCK) {
1147 /* Do any Windows flavour locks conflict ? */
1148 if (brl_conflict(lock, plock)) {
1152 /* Just copy the Windows lock into the new array. */
1153 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1158 /* Work out overlaps. */
1159 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1161 if (tmp_count == 0) {
1162 /* plock overlapped the existing lock completely,
1163 or replaced it. Don't copy the existing lock. */
1164 overlap_found = true;
1165 } else if (tmp_count == 1) {
1166 /* Either no overlap, (simple copy of existing lock) or
1167 * an overlap of an existing lock. */
1168 /* If the lock changed size, we had an overlap. */
1169 if (tp[count].size != lock->size) {
1170 overlap_found = true;
1173 } else if (tmp_count == 2) {
1174 /* We split a lock range in two. */
1175 overlap_found = true;
1178 /* Optimisation... */
1179 /* We know we're finished here as we can't overlap any
1180 more POSIX locks. Copy the rest of the lock array. */
1182 if (i < br_lck->num_locks - 1) {
1183 memcpy(&tp[count], &locks[i+1],
1184 sizeof(*locks)*((br_lck->num_locks-1) - i));
1185 count += ((br_lck->num_locks-1) - i);
1192 if (!overlap_found) {
1193 /* Just ignore - no change. */
1195 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1199 /* Unlock any POSIX regions. */
1200 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1201 release_posix_lock_posix_flavour(br_lck->fsp,
1209 /* Realloc so we don't leak entries per unlock call. */
1211 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
1213 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1217 /* We deleted the last lock. */
1222 contend_level2_oplocks_end(br_lck->fsp,
1223 LEVEL2_CONTEND_POSIX_BRL);
1225 br_lck->num_locks = count;
1226 TALLOC_FREE(br_lck->lock_data);
1228 br_lck->lock_data = tp;
1229 br_lck->modified = True;
1231 /* Send unlock messages to any pending waiters that overlap. */
1233 for (j=0; j < br_lck->num_locks; j++) {
1234 struct lock_struct *pend_lock = &locks[j];
1236 /* Ignore non-pending locks. */
1237 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1241 /* We could send specific lock info here... */
1242 if (brl_pending_overlap(plock, pend_lock)) {
1243 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1244 procid_str_static(&pend_lock->context.pid )));
1246 messaging_send(msg_ctx, pend_lock->context.pid,
1247 MSG_SMB_UNLOCK, &data_blob_null);
1254 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1255 struct messaging_context *msg_ctx,
1256 struct byte_range_lock *br_lck,
1257 const struct lock_struct *plock)
1259 VFS_FIND(brl_unlock_windows);
1260 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
1264 /****************************************************************************
1265 Unlock a range of bytes.
1266 ****************************************************************************/
1268 bool brl_unlock(struct messaging_context *msg_ctx,
1269 struct byte_range_lock *br_lck,
1271 struct server_id pid,
1274 enum brl_flavour lock_flav)
1276 struct lock_struct lock;
1278 lock.context.smblctx = smblctx;
1279 lock.context.pid = pid;
1280 lock.context.tid = br_lck->fsp->conn->cnum;
1283 lock.fnum = br_lck->fsp->fnum;
1284 lock.lock_type = UNLOCK_LOCK;
1285 lock.lock_flav = lock_flav;
1287 if (lock_flav == WINDOWS_LOCK) {
1288 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1291 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1295 /****************************************************************************
1296 Test if we could add a lock if we wanted to.
1297 Returns True if the region required is currently unlocked, False if locked.
1298 ****************************************************************************/
1300 bool brl_locktest(struct byte_range_lock *br_lck,
1302 struct server_id pid,
1305 enum brl_type lock_type,
1306 enum brl_flavour lock_flav)
1310 struct lock_struct lock;
1311 const struct lock_struct *locks = br_lck->lock_data;
1312 files_struct *fsp = br_lck->fsp;
1314 lock.context.smblctx = smblctx;
1315 lock.context.pid = pid;
1316 lock.context.tid = br_lck->fsp->conn->cnum;
1319 lock.fnum = fsp->fnum;
1320 lock.lock_type = lock_type;
1321 lock.lock_flav = lock_flav;
1323 /* Make sure existing locks don't conflict */
1324 for (i=0; i < br_lck->num_locks; i++) {
1326 * Our own locks don't conflict.
1328 if (brl_conflict_other(&locks[i], &lock)) {
1334 * There is no lock held by an SMB daemon, check to
1335 * see if there is a POSIX lock from a UNIX or NFS process.
1336 * This only conflicts with Windows locks, not POSIX locks.
1339 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1340 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1342 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for %s file %s\n",
1343 (double)start, (double)size, ret ? "locked" : "unlocked",
1344 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1346 /* We need to return the inverse of is_posix_locked. */
1350 /* no conflicts - we could have added it */
1354 /****************************************************************************
1355 Query for existing locks.
1356 ****************************************************************************/
1358 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1360 struct server_id pid,
1363 enum brl_type *plock_type,
1364 enum brl_flavour lock_flav)
1367 struct lock_struct lock;
1368 const struct lock_struct *locks = br_lck->lock_data;
1369 files_struct *fsp = br_lck->fsp;
1371 lock.context.smblctx = *psmblctx;
1372 lock.context.pid = pid;
1373 lock.context.tid = br_lck->fsp->conn->cnum;
1374 lock.start = *pstart;
1376 lock.fnum = fsp->fnum;
1377 lock.lock_type = *plock_type;
1378 lock.lock_flav = lock_flav;
1380 /* Make sure existing locks don't conflict */
1381 for (i=0; i < br_lck->num_locks; i++) {
1382 const struct lock_struct *exlock = &locks[i];
1383 bool conflict = False;
1385 if (exlock->lock_flav == WINDOWS_LOCK) {
1386 conflict = brl_conflict(exlock, &lock);
1388 conflict = brl_conflict_posix(exlock, &lock);
1392 *psmblctx = exlock->context.smblctx;
1393 *pstart = exlock->start;
1394 *psize = exlock->size;
1395 *plock_type = exlock->lock_type;
1396 return NT_STATUS_LOCK_NOT_GRANTED;
1401 * There is no lock held by an SMB daemon, check to
1402 * see if there is a POSIX lock from a UNIX or NFS process.
1405 if(lp_posix_locking(fsp->conn->params)) {
1406 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1408 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s\n",
1409 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1410 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1413 /* Hmmm. No clue what to set smblctx to - use -1. */
1414 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1415 return NT_STATUS_LOCK_NOT_GRANTED;
1419 return NT_STATUS_OK;
1423 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1424 struct byte_range_lock *br_lck,
1425 struct lock_struct *plock,
1426 struct blocking_lock_record *blr)
1428 VFS_FIND(brl_cancel_windows);
1429 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock, blr);
1432 /****************************************************************************
1433 Remove a particular pending lock.
1434 ****************************************************************************/
1435 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1437 struct server_id pid,
1440 enum brl_flavour lock_flav,
1441 struct blocking_lock_record *blr)
1444 struct lock_struct lock;
1446 lock.context.smblctx = smblctx;
1447 lock.context.pid = pid;
1448 lock.context.tid = br_lck->fsp->conn->cnum;
1451 lock.fnum = br_lck->fsp->fnum;
1452 lock.lock_flav = lock_flav;
1453 /* lock.lock_type doesn't matter */
1455 if (lock_flav == WINDOWS_LOCK) {
1456 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1459 ret = brl_lock_cancel_default(br_lck, &lock);
1465 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1466 struct lock_struct *plock)
1469 struct lock_struct *locks = br_lck->lock_data;
1473 for (i = 0; i < br_lck->num_locks; i++) {
1474 struct lock_struct *lock = &locks[i];
1476 /* For pending locks we *always* care about the fnum. */
1477 if (brl_same_context(&lock->context, &plock->context) &&
1478 lock->fnum == plock->fnum &&
1479 IS_PENDING_LOCK(lock->lock_type) &&
1480 lock->lock_flav == plock->lock_flav &&
1481 lock->start == plock->start &&
1482 lock->size == plock->size) {
1487 if (i == br_lck->num_locks) {
1488 /* Didn't find it. */
1492 if (i < br_lck->num_locks - 1) {
1493 /* Found this particular pending lock - delete it */
1494 memmove(&locks[i], &locks[i+1],
1495 sizeof(*locks)*((br_lck->num_locks-1) - i));
1498 br_lck->num_locks -= 1;
1499 br_lck->modified = True;
1503 /****************************************************************************
1504 Remove any locks associated with a open file.
1505 We return True if this process owns any other Windows locks on this
1506 fd and so we should not immediately close the fd.
1507 ****************************************************************************/
1509 void brl_close_fnum(struct messaging_context *msg_ctx,
1510 struct byte_range_lock *br_lck)
1512 files_struct *fsp = br_lck->fsp;
1513 uint32_t tid = fsp->conn->cnum;
1514 uint64_t fnum = fsp->fnum;
1516 struct lock_struct *locks = br_lck->lock_data;
1517 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1518 struct lock_struct *locks_copy;
1519 unsigned int num_locks_copy;
1521 /* Copy the current lock array. */
1522 if (br_lck->num_locks) {
1523 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1525 smb_panic("brl_close_fnum: talloc failed");
1531 num_locks_copy = br_lck->num_locks;
1533 for (i=0; i < num_locks_copy; i++) {
1534 struct lock_struct *lock = &locks_copy[i];
1536 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
1537 (lock->fnum == fnum)) {
1540 lock->context.smblctx,
1549 bool brl_mark_disconnected(struct files_struct *fsp)
1551 uint32_t tid = fsp->conn->cnum;
1552 uint64_t smblctx = fsp->op->global->open_persistent_id;
1553 uint64_t fnum = fsp->fnum;
1555 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1556 struct byte_range_lock *br_lck = NULL;
1558 if (!fsp->op->global->durable) {
1562 if (fsp->current_lock_count == 0) {
1566 br_lck = brl_get_locks(talloc_tos(), fsp);
1567 if (br_lck == NULL) {
1571 for (i=0; i < br_lck->num_locks; i++) {
1572 struct lock_struct *lock = &br_lck->lock_data[i];
1575 * as this is a durable handle, we only expect locks
1576 * of the current file handle!
1579 if (lock->context.smblctx != smblctx) {
1580 TALLOC_FREE(br_lck);
1584 if (lock->context.tid != tid) {
1585 TALLOC_FREE(br_lck);
1589 if (!serverid_equal(&lock->context.pid, &self)) {
1590 TALLOC_FREE(br_lck);
1594 if (lock->fnum != fnum) {
1595 TALLOC_FREE(br_lck);
1599 server_id_set_disconnected(&lock->context.pid);
1600 lock->context.tid = TID_FIELD_INVALID;
1601 lock->fnum = FNUM_FIELD_INVALID;
1604 br_lck->modified = true;
1605 TALLOC_FREE(br_lck);
1609 bool brl_reconnect_disconnected(struct files_struct *fsp)
1611 uint32_t tid = fsp->conn->cnum;
1612 uint64_t smblctx = fsp->op->global->open_persistent_id;
1613 uint64_t fnum = fsp->fnum;
1615 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1616 struct byte_range_lock *br_lck = NULL;
1618 if (!fsp->op->global->durable) {
1623 * When reconnecting, we do not want to validate the brlock entries
1624 * and thereby remove our own (disconnected) entries but reactivate
1627 fsp->lockdb_clean = true;
1629 br_lck = brl_get_locks(talloc_tos(), fsp);
1630 if (br_lck == NULL) {
1634 if (br_lck->num_locks == 0) {
1635 TALLOC_FREE(br_lck);
1639 for (i=0; i < br_lck->num_locks; i++) {
1640 struct lock_struct *lock = &br_lck->lock_data[i];
1643 * as this is a durable handle we only expect locks
1644 * of the current file handle!
1647 if (lock->context.smblctx != smblctx) {
1648 TALLOC_FREE(br_lck);
1652 if (lock->context.tid != TID_FIELD_INVALID) {
1653 TALLOC_FREE(br_lck);
1657 if (!server_id_is_disconnected(&lock->context.pid)) {
1658 TALLOC_FREE(br_lck);
1662 if (lock->fnum != FNUM_FIELD_INVALID) {
1663 TALLOC_FREE(br_lck);
1667 lock->context.pid = self;
1668 lock->context.tid = tid;
1672 fsp->current_lock_count = br_lck->num_locks;
1673 br_lck->modified = true;
1674 TALLOC_FREE(br_lck);
1678 /****************************************************************************
1679 Ensure this set of lock entries is valid.
1680 ****************************************************************************/
1681 static bool validate_lock_entries(TALLOC_CTX *mem_ctx,
1682 unsigned int *pnum_entries, struct lock_struct **pplocks,
1683 bool keep_disconnected)
1686 unsigned int num_valid_entries = 0;
1687 struct lock_struct *locks = *pplocks;
1688 TALLOC_CTX *frame = talloc_stackframe();
1689 struct server_id *ids;
1692 ids = talloc_array(frame, struct server_id, *pnum_entries);
1694 DEBUG(0, ("validate_lock_entries: "
1695 "talloc_array(struct server_id, %u) failed\n",
1701 exists = talloc_array(frame, bool, *pnum_entries);
1702 if (exists == NULL) {
1703 DEBUG(0, ("validate_lock_entries: "
1704 "talloc_array(bool, %u) failed\n",
1710 for (i = 0; i < *pnum_entries; i++) {
1711 ids[i] = locks[i].context.pid;
1714 if (!serverids_exist(ids, *pnum_entries, exists)) {
1715 DEBUG(3, ("validate_lock_entries: serverids_exists failed\n"));
1720 for (i = 0; i < *pnum_entries; i++) {
1722 num_valid_entries++;
1726 if (keep_disconnected &&
1727 server_id_is_disconnected(&ids[i]))
1729 num_valid_entries++;
1733 /* This process no longer exists - mark this
1734 entry as invalid by zeroing it. */
1735 ZERO_STRUCTP(&locks[i]);
1739 if (num_valid_entries != *pnum_entries) {
1740 struct lock_struct *new_lock_data = NULL;
1742 if (num_valid_entries) {
1743 new_lock_data = talloc_array(
1744 mem_ctx, struct lock_struct,
1746 if (!new_lock_data) {
1747 DEBUG(3, ("malloc fail\n"));
1751 num_valid_entries = 0;
1752 for (i = 0; i < *pnum_entries; i++) {
1753 struct lock_struct *lock_data = &locks[i];
1754 if (lock_data->context.smblctx &&
1755 lock_data->context.tid) {
1756 /* Valid (nonzero) entry - copy it. */
1757 memcpy(&new_lock_data[num_valid_entries],
1758 lock_data, sizeof(struct lock_struct));
1759 num_valid_entries++;
1764 TALLOC_FREE(*pplocks);
1765 *pplocks = new_lock_data;
1766 *pnum_entries = num_valid_entries;
1772 struct brl_forall_cb {
1773 void (*fn)(struct file_id id, struct server_id pid,
1774 enum brl_type lock_type,
1775 enum brl_flavour lock_flav,
1776 br_off start, br_off size,
1777 void *private_data);
1781 /****************************************************************************
1782 Traverse the whole database with this function, calling traverse_callback
1784 ****************************************************************************/
1786 static int brl_traverse_fn(struct db_record *rec, void *state)
1788 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1789 struct lock_struct *locks;
1790 struct file_id *key;
1792 unsigned int num_locks = 0;
1793 unsigned int orig_num_locks = 0;
1797 dbkey = dbwrap_record_get_key(rec);
1798 value = dbwrap_record_get_value(rec);
1800 /* In a traverse function we must make a copy of
1801 dbuf before modifying it. */
1803 locks = (struct lock_struct *)talloc_memdup(
1804 talloc_tos(), value.dptr, value.dsize);
1806 return -1; /* Terminate traversal. */
1809 key = (struct file_id *)dbkey.dptr;
1810 orig_num_locks = num_locks = value.dsize/sizeof(*locks);
1812 /* Ensure the lock db is clean of entries from invalid processes. */
1814 if (!validate_lock_entries(talloc_tos(), &num_locks, &locks, true)) {
1816 return -1; /* Terminate traversal */
1819 if (orig_num_locks != num_locks) {
1822 data.dptr = (uint8_t *)locks;
1823 data.dsize = num_locks*sizeof(struct lock_struct);
1824 dbwrap_record_store(rec, data, TDB_REPLACE);
1826 dbwrap_record_delete(rec);
1831 for ( i=0; i<num_locks; i++) {
1833 locks[i].context.pid,
1846 /*******************************************************************
1847 Call the specified function on each lock in the database.
1848 ********************************************************************/
1850 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1851 enum brl_type lock_type,
1852 enum brl_flavour lock_flav,
1853 br_off start, br_off size,
1854 void *private_data),
1857 struct brl_forall_cb cb;
1865 cb.private_data = private_data;
1866 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1868 if (!NT_STATUS_IS_OK(status)) {
1875 /*******************************************************************
1876 Store a potentially modified set of byte range lock data back into
1879 ********************************************************************/
1881 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1883 if (br_lck->read_only) {
1884 SMB_ASSERT(!br_lck->modified);
1887 if (!br_lck->modified) {
1891 if (br_lck->num_locks == 0) {
1892 /* No locks - delete this entry. */
1893 NTSTATUS status = dbwrap_record_delete(br_lck->record);
1894 if (!NT_STATUS_IS_OK(status)) {
1895 DEBUG(0, ("delete_rec returned %s\n",
1896 nt_errstr(status)));
1897 smb_panic("Could not delete byte range lock entry");
1903 data.dptr = (uint8 *)br_lck->lock_data;
1904 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1906 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1907 if (!NT_STATUS_IS_OK(status)) {
1908 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1909 smb_panic("Could not store byte range mode entry");
1915 br_lck->read_only = true;
1916 br_lck->modified = false;
1918 TALLOC_FREE(br_lck->record);
1921 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1923 byte_range_lock_flush(br_lck);
1927 /*******************************************************************
1928 Fetch a set of byte range lock data from the database.
1929 Leave the record locked.
1930 TALLOC_FREE(brl) will release the lock in the destructor.
1931 ********************************************************************/
1933 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1934 files_struct *fsp, bool read_only)
1937 struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock);
1938 bool do_read_only = read_only;
1940 if (br_lck == NULL) {
1945 br_lck->num_locks = 0;
1946 br_lck->modified = False;
1947 br_lck->key = fsp->file_id;
1949 key.dptr = (uint8 *)&br_lck->key;
1950 key.dsize = sizeof(struct file_id);
1952 if (!fsp->lockdb_clean) {
1953 /* We must be read/write to clean
1954 the dead entries. */
1955 do_read_only = false;
1960 status = dbwrap_fetch(brlock_db, br_lck, key, &data);
1961 if (!NT_STATUS_IS_OK(status)) {
1962 DEBUG(3, ("Could not fetch byte range lock record\n"));
1963 TALLOC_FREE(br_lck);
1966 br_lck->record = NULL;
1968 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1970 if (br_lck->record == NULL) {
1971 DEBUG(3, ("Could not lock byte range lock entry\n"));
1972 TALLOC_FREE(br_lck);
1976 data = dbwrap_record_get_value(br_lck->record);
1979 if ((data.dsize % sizeof(struct lock_struct)) != 0) {
1980 DEBUG(3, ("Got invalid brlock data\n"));
1981 TALLOC_FREE(br_lck);
1985 br_lck->read_only = do_read_only;
1986 br_lck->lock_data = NULL;
1988 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1990 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1992 if (br_lck->num_locks != 0) {
1993 br_lck->lock_data = talloc_array(
1994 br_lck, struct lock_struct, br_lck->num_locks);
1995 if (br_lck->lock_data == NULL) {
1996 DEBUG(0, ("malloc failed\n"));
1997 TALLOC_FREE(br_lck);
2001 memcpy(br_lck->lock_data, data.dptr, data.dsize);
2004 if (!fsp->lockdb_clean) {
2005 int orig_num_locks = br_lck->num_locks;
2008 * This is the first time we access the byte range lock
2009 * record with this fsp. Go through and ensure all entries
2010 * are valid - remove any that don't.
2011 * This makes the lockdb self cleaning at low cost.
2013 * Note: Disconnected entries belong to disconnected
2014 * durable handles. So at this point, we have a new
2015 * handle on the file and the disconnected durable has
2016 * already been closed (we are not a durable reconnect).
2017 * So we need to clean the disconnected brl entry.
2020 if (!validate_lock_entries(br_lck, &br_lck->num_locks,
2021 &br_lck->lock_data, false)) {
2022 TALLOC_FREE(br_lck);
2026 /* Ensure invalid locks are cleaned up in the destructor. */
2027 if (orig_num_locks != br_lck->num_locks) {
2028 br_lck->modified = True;
2031 /* Mark the lockdb as "clean" as seen from this open file. */
2032 fsp->lockdb_clean = True;
2035 if (DEBUGLEVEL >= 10) {
2037 struct lock_struct *locks = br_lck->lock_data;
2038 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
2040 file_id_string_tos(&fsp->file_id)));
2041 for( i = 0; i < br_lck->num_locks; i++) {
2042 print_lock_struct(i, &locks[i]);
2046 if (do_read_only != read_only) {
2048 * this stores the record and gets rid of
2049 * the write lock that is needed for a cleanup
2051 byte_range_lock_flush(br_lck);
2057 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
2060 return brl_get_locks_internal(mem_ctx, fsp, False);
2063 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
2065 struct byte_range_lock *br_lock;
2067 if (lp_clustering()) {
2068 return brl_get_locks_internal(talloc_tos(), fsp, true);
2071 if ((fsp->brlock_rec != NULL)
2072 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
2073 return fsp->brlock_rec;
2076 TALLOC_FREE(fsp->brlock_rec);
2078 br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
2079 if (br_lock == NULL) {
2082 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
2084 fsp->brlock_rec = talloc_move(fsp, &br_lock);
2086 return fsp->brlock_rec;
2089 struct brl_revalidate_state {
2092 struct server_id *pids;
2096 * Collect PIDs of all processes with pending entries
2099 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
2100 enum brl_type lock_type,
2101 enum brl_flavour lock_flav,
2102 br_off start, br_off size,
2105 struct brl_revalidate_state *state =
2106 (struct brl_revalidate_state *)private_data;
2108 if (!IS_PENDING_LOCK(lock_type)) {
2112 add_to_large_array(state, sizeof(pid), (void *)&pid,
2113 &state->pids, &state->num_pids,
2114 &state->array_size);
2118 * qsort callback to sort the processes
2121 static int compare_procids(const void *p1, const void *p2)
2123 const struct server_id *i1 = (const struct server_id *)p1;
2124 const struct server_id *i2 = (const struct server_id *)p2;
2126 if (i1->pid < i2->pid) return -1;
2127 if (i2->pid > i2->pid) return 1;
2132 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2133 * locks so that they retry. Mainly used in the cluster code after a node has
2136 * Done in two steps to avoid double-sends: First we collect all entries in an
2137 * array, then qsort that array and only send to non-dupes.
2140 void brl_revalidate(struct messaging_context *msg_ctx,
2143 struct server_id server_id,
2146 struct brl_revalidate_state *state;
2148 struct server_id last_pid;
2150 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
2151 DEBUG(0, ("talloc failed\n"));
2155 brl_forall(brl_revalidate_collect, state);
2157 if (state->array_size == -1) {
2158 DEBUG(0, ("talloc failed\n"));
2162 if (state->num_pids == 0) {
2166 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2168 ZERO_STRUCT(last_pid);
2170 for (i=0; i<state->num_pids; i++) {
2171 if (serverid_equal(&last_pid, &state->pids[i])) {
2173 * We've seen that one already
2178 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2180 last_pid = state->pids[i];
2188 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
2191 TALLOC_CTX *frame = talloc_stackframe();
2193 struct db_record *rec;
2194 struct lock_struct *lock;
2198 key = make_tdb_data((void*)&fid, sizeof(fid));
2200 rec = dbwrap_fetch_locked(brlock_db, frame, key);
2202 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
2203 "for file %s\n", file_id_string(frame, &fid)));
2207 val = dbwrap_record_get_value(rec);
2208 lock = (struct lock_struct*)val.dptr;
2209 num = val.dsize / sizeof(struct lock_struct);
2211 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
2212 "file %s\n", file_id_string(frame, &fid)));
2217 for (n=0; n<num; n++) {
2218 struct lock_context *ctx = &lock[n].context;
2220 if (!server_id_is_disconnected(&ctx->pid)) {
2221 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2222 "%s used by server %s, do not cleanup\n",
2223 file_id_string(frame, &fid),
2224 server_id_str(frame, &ctx->pid)));
2228 if (ctx->smblctx != open_persistent_id) {
2229 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2230 "%s expected smblctx %llu but found %llu"
2231 ", do not cleanup\n",
2232 file_id_string(frame, &fid),
2233 (unsigned long long)open_persistent_id,
2234 (unsigned long long)ctx->smblctx));
2239 status = dbwrap_record_delete(rec);
2240 if (!NT_STATUS_IS_OK(status)) {
2241 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
2242 "for file %s from %s, open %llu: %s\n",
2243 file_id_string(frame, &fid), dbwrap_name(brlock_db),
2244 (unsigned long long)open_persistent_id,
2245 nt_errstr(status)));
2249 DEBUG(10, ("brl_cleanup_disconnected: "
2250 "file %s cleaned up %u entries from open %llu\n",
2251 file_id_string(frame, &fid), num,
2252 (unsigned long long)open_persistent_id));