2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
30 #define DBGC_CLASS DBGC_LOCKING
34 /* The open brlock.tdb database. */
36 static struct db_context *brlock_db;
38 /****************************************************************************
39 Debug info at level 10 for lock struct.
40 ****************************************************************************/
42 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %s, ",
46 (unsigned int)pls->context.smbpid,
47 (unsigned int)pls->context.tid,
48 procid_str_static(&pls->context.pid) ));
50 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
54 lock_type_name(pls->lock_type),
55 lock_flav_name(pls->lock_flav) ));
58 /****************************************************************************
59 See if two locking contexts are equal.
60 ****************************************************************************/
62 BOOL brl_same_context(const struct lock_context *ctx1,
63 const struct lock_context *ctx2)
65 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
66 (ctx1->smbpid == ctx2->smbpid) &&
67 (ctx1->tid == ctx2->tid));
70 /****************************************************************************
71 See if lck1 and lck2 overlap.
72 ****************************************************************************/
74 static BOOL brl_overlap(const struct lock_struct *lck1,
75 const struct lock_struct *lck2)
77 /* this extra check is not redundent - it copes with locks
78 that go beyond the end of 64 bit file space */
79 if (lck1->size != 0 &&
80 lck1->start == lck2->start &&
81 lck1->size == lck2->size) {
85 if (lck1->start >= (lck2->start+lck2->size) ||
86 lck2->start >= (lck1->start+lck1->size)) {
92 /****************************************************************************
93 See if lock2 can be added when lock1 is in place.
94 ****************************************************************************/
96 static BOOL brl_conflict(const struct lock_struct *lck1,
97 const struct lock_struct *lck2)
99 /* Ignore PENDING locks. */
100 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
103 /* Read locks never conflict. */
104 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
108 if (brl_same_context(&lck1->context, &lck2->context) &&
109 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
113 return brl_overlap(lck1, lck2);
116 /****************************************************************************
117 See if lock2 can be added when lock1 is in place - when both locks are POSIX
118 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120 ****************************************************************************/
122 static BOOL brl_conflict_posix(const struct lock_struct *lck1,
123 const struct lock_struct *lck2)
125 #if defined(DEVELOPER)
126 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
127 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
130 /* Ignore PENDING locks. */
131 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
134 /* Read locks never conflict. */
135 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
139 /* Locks on the same context con't conflict. Ignore fnum. */
140 if (brl_same_context(&lck1->context, &lck2->context)) {
144 /* One is read, the other write, or the context is different,
146 return brl_overlap(lck1, lck2);
150 static BOOL brl_conflict1(const struct lock_struct *lck1,
151 const struct lock_struct *lck2)
153 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
156 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
160 if (brl_same_context(&lck1->context, &lck2->context) &&
161 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
165 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
169 if (lck1->start >= (lck2->start + lck2->size) ||
170 lck2->start >= (lck1->start + lck1->size)) {
178 /****************************************************************************
179 Check to see if this lock conflicts, but ignore our own locks on the
180 same fnum only. This is the read/write lock check code path.
181 This is never used in the POSIX lock case.
182 ****************************************************************************/
184 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
186 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
189 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
192 /* POSIX flavour locks never conflict here - this is only called
193 in the read/write path. */
195 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
199 * Incoming WRITE locks conflict with existing READ locks even
200 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
203 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
204 if (brl_same_context(&lck1->context, &lck2->context) &&
205 lck1->fnum == lck2->fnum)
209 return brl_overlap(lck1, lck2);
212 /****************************************************************************
213 Check if an unlock overlaps a pending lock.
214 ****************************************************************************/
216 static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
218 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
220 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
225 /****************************************************************************
226 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
227 is the same as this one and changes its error code. I wonder if any
228 app depends on this ?
229 ****************************************************************************/
231 static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
233 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
234 /* amazing the little things you learn with a test
235 suite. Locks beyond this offset (as a 64 bit
236 number!) always generate the conflict error code,
237 unless the top bit is set */
238 if (!blocking_lock) {
239 fsp->last_lock_failure = *lock;
241 return NT_STATUS_FILE_LOCK_CONFLICT;
244 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
245 lock->context.tid == fsp->last_lock_failure.context.tid &&
246 lock->fnum == fsp->last_lock_failure.fnum &&
247 lock->start == fsp->last_lock_failure.start) {
248 return NT_STATUS_FILE_LOCK_CONFLICT;
251 if (!blocking_lock) {
252 fsp->last_lock_failure = *lock;
254 return NT_STATUS_LOCK_NOT_GRANTED;
257 /****************************************************************************
258 Open up the brlock.tdb database.
259 ****************************************************************************/
261 void brl_init(int read_only)
266 brlock_db = db_open(NULL, lock_path("brlock.tdb"), 0,
269 |(read_only?0x0:TDB_CLEAR_IF_FIRST),
270 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
272 DEBUG(0,("Failed to open byte range locking database %s\n",
273 lock_path("brlock.tdb")));
278 /****************************************************************************
279 Close down the brlock.tdb database.
280 ****************************************************************************/
282 void brl_shutdown(int read_only)
287 TALLOC_FREE(brlock_db);
291 /****************************************************************************
292 Compare two locks for sorting.
293 ****************************************************************************/
295 static int lock_compare(const struct lock_struct *lck1,
296 const struct lock_struct *lck2)
298 if (lck1->start != lck2->start) {
299 return (lck1->start - lck2->start);
301 if (lck2->size != lck1->size) {
302 return ((int)lck1->size - (int)lck2->size);
308 /****************************************************************************
309 Lock a range of bytes - Windows lock semantics.
310 ****************************************************************************/
312 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
313 struct lock_struct *plock, BOOL blocking_lock)
316 files_struct *fsp = br_lck->fsp;
317 struct lock_struct *locks = br_lck->lock_data;
319 for (i=0; i < br_lck->num_locks; i++) {
320 /* Do any Windows or POSIX locks conflict ? */
321 if (brl_conflict(&locks[i], plock)) {
322 /* Remember who blocked us. */
323 plock->context.smbpid = locks[i].context.smbpid;
324 return brl_lock_failed(fsp,plock,blocking_lock);
327 if (plock->start == 0 && plock->size == 0 &&
328 locks[i].size == 0) {
334 /* We can get the Windows lock, now see if it needs to
335 be mapped into a lower level POSIX one, and if so can
338 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
340 if (!set_posix_lock_windows_flavour(fsp,
349 /* We don't know who blocked us. */
350 plock->context.smbpid = 0xFFFFFFFF;
352 if (errno_ret == EACCES || errno_ret == EAGAIN) {
353 return NT_STATUS_FILE_LOCK_CONFLICT;
355 return map_nt_error_from_unix(errno);
360 /* no conflicts - add it to the list of locks */
361 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
363 return NT_STATUS_NO_MEMORY;
366 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
367 br_lck->num_locks += 1;
368 br_lck->lock_data = locks;
369 br_lck->modified = True;
374 /****************************************************************************
375 Cope with POSIX range splits and merges.
376 ****************************************************************************/
378 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
379 const struct lock_struct *ex, /* existing lock. */
380 const struct lock_struct *plock, /* proposed lock. */
381 BOOL *lock_was_added)
383 BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
385 /* We can't merge non-conflicting locks on different context - ignore fnum. */
387 if (!brl_same_context(&ex->context, &plock->context)) {
389 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
393 /* We now know we have the same context. */
395 /* Did we overlap ? */
397 /*********************************************
408 **********************************************/
410 if ( (ex->start > (plock->start + plock->size)) ||
411 (plock->start > (ex->start + ex->size))) {
412 /* No overlap with this lock - copy existing. */
413 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
417 /*********************************************
418 +---------------------------+
420 +---------------------------+
421 +---------------------------+
422 | plock | -> replace with plock.
423 +---------------------------+
424 **********************************************/
426 if ( (ex->start >= plock->start) &&
427 (ex->start + ex->size <= plock->start + plock->size) ) {
428 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
429 *lock_was_added = True;
433 /*********************************************
434 +-----------------------+
436 +-----------------------+
449 +---------------+-------+
450 | plock | ex | - different lock types.
451 +---------------+-------+
453 +-----------------------+
454 | ex | - same lock type.
455 +-----------------------+
456 **********************************************/
458 if ( (ex->start >= plock->start) &&
459 (ex->start <= plock->start + plock->size) &&
460 (ex->start + ex->size > plock->start + plock->size) ) {
462 *lock_was_added = True;
464 /* If the lock types are the same, we merge, if different, we
465 add the new lock before the old. */
467 if (lock_types_differ) {
469 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
470 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
471 /* Adjust existing start and size. */
472 lck_arr[1].start = plock->start + plock->size;
473 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
477 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
478 /* Set new start and size. */
479 lck_arr[0].start = plock->start;
480 lck_arr[0].size = (ex->start + ex->size) - plock->start;
485 /*********************************************
486 +-----------------------+
488 +-----------------------+
500 +-------+---------------+
501 | ex | plock | - different lock types
502 +-------+---------------+
505 +-----------------------+
506 | ex | - same lock type.
507 +-----------------------+
509 **********************************************/
511 if ( (ex->start < plock->start) &&
512 (ex->start + ex->size >= plock->start) &&
513 (ex->start + ex->size <= plock->start + plock->size) ) {
515 *lock_was_added = True;
517 /* If the lock types are the same, we merge, if different, we
518 add the new lock after the old. */
520 if (lock_types_differ) {
521 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
522 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
523 /* Adjust existing size. */
524 lck_arr[0].size = plock->start - ex->start;
528 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
529 /* Adjust existing size. */
530 lck_arr[0].size = (plock->start + plock->size) - ex->start;
535 /*********************************************
536 +---------------------------+
538 +---------------------------+
543 +-------+---------+---------+
544 | ex | plock | ex | - different lock types.
545 +-------+---------+---------+
547 +---------------------------+
548 | ex | - same lock type.
549 +---------------------------+
550 **********************************************/
552 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
553 *lock_was_added = True;
555 if (lock_types_differ) {
557 /* We have to split ex into two locks here. */
559 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
560 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
561 memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
563 /* Adjust first existing size. */
564 lck_arr[0].size = plock->start - ex->start;
566 /* Adjust second existing start and size. */
567 lck_arr[2].start = plock->start + plock->size;
568 lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
571 /* Just eat plock. */
572 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
577 /* Never get here. */
578 smb_panic("brlock_posix_split_merge");
581 /* Keep some compilers happy. */
585 /****************************************************************************
586 Lock a range of bytes - POSIX lock semantics.
587 We must cope with range splits and merges.
588 ****************************************************************************/
590 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
591 struct byte_range_lock *br_lck,
592 struct lock_struct *plock)
594 unsigned int i, count;
595 struct lock_struct *locks = br_lck->lock_data;
596 struct lock_struct *tp;
597 BOOL lock_was_added = False;
598 BOOL signal_pending_read = False;
600 /* No zero-zero locks for POSIX. */
601 if (plock->start == 0 && plock->size == 0) {
602 return NT_STATUS_INVALID_PARAMETER;
605 /* Don't allow 64-bit lock wrap. */
606 if (plock->start + plock->size < plock->start ||
607 plock->start + plock->size < plock->size) {
608 return NT_STATUS_INVALID_PARAMETER;
611 /* The worst case scenario here is we have to split an
612 existing POSIX lock range into two, and add our lock,
613 so we need at most 2 more entries. */
615 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
617 return NT_STATUS_NO_MEMORY;
621 for (i=0; i < br_lck->num_locks; i++) {
622 struct lock_struct *curr_lock = &locks[i];
624 /* If we have a pending read lock, a lock downgrade should
625 trigger a lock re-evaluation. */
626 if (curr_lock->lock_type == PENDING_READ_LOCK &&
627 brl_pending_overlap(plock, curr_lock)) {
628 signal_pending_read = True;
631 if (curr_lock->lock_flav == WINDOWS_LOCK) {
632 /* Do any Windows flavour locks conflict ? */
633 if (brl_conflict(curr_lock, plock)) {
634 /* No games with error messages. */
636 /* Remember who blocked us. */
637 plock->context.smbpid = curr_lock->context.smbpid;
638 return NT_STATUS_FILE_LOCK_CONFLICT;
640 /* Just copy the Windows lock into the new array. */
641 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
644 /* POSIX conflict semantics are different. */
645 if (brl_conflict_posix(curr_lock, plock)) {
646 /* Can't block ourselves with POSIX locks. */
647 /* No games with error messages. */
649 /* Remember who blocked us. */
650 plock->context.smbpid = curr_lock->context.smbpid;
651 return NT_STATUS_FILE_LOCK_CONFLICT;
654 /* Work out overlaps. */
655 count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
659 if (!lock_was_added) {
660 memcpy(&tp[count], plock, sizeof(struct lock_struct));
664 /* We can get the POSIX lock, now see if it needs to
665 be mapped into a lower level POSIX one, and if so can
668 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
671 /* The lower layer just needs to attempt to
672 get the system POSIX lock. We've weeded out
673 any conflicts above. */
675 if (!set_posix_lock_posix_flavour(br_lck->fsp,
681 /* We don't know who blocked us. */
682 plock->context.smbpid = 0xFFFFFFFF;
684 if (errno_ret == EACCES || errno_ret == EAGAIN) {
686 return NT_STATUS_FILE_LOCK_CONFLICT;
689 return map_nt_error_from_unix(errno);
694 /* Realloc so we don't leak entries per lock call. */
695 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
697 return NT_STATUS_NO_MEMORY;
699 br_lck->num_locks = count;
700 SAFE_FREE(br_lck->lock_data);
701 br_lck->lock_data = tp;
703 br_lck->modified = True;
705 /* A successful downgrade from write to read lock can trigger a lock
706 re-evalutation where waiting readers can now proceed. */
708 if (signal_pending_read) {
709 /* Send unlock messages to any pending read waiters that overlap. */
710 for (i=0; i < br_lck->num_locks; i++) {
711 struct lock_struct *pend_lock = &locks[i];
713 /* Ignore non-pending locks. */
714 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
718 if (pend_lock->lock_type == PENDING_READ_LOCK &&
719 brl_pending_overlap(plock, pend_lock)) {
720 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
721 procid_str_static(&pend_lock->context.pid )));
723 messaging_send(msg_ctx, pend_lock->context.pid,
724 MSG_SMB_UNLOCK, &data_blob_null);
732 /****************************************************************************
733 Lock a range of bytes.
734 ****************************************************************************/
736 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
737 struct byte_range_lock *br_lck,
739 struct server_id pid,
742 enum brl_type lock_type,
743 enum brl_flavour lock_flav,
748 struct lock_struct lock;
751 if (start == 0 && size == 0) {
752 DEBUG(0,("client sent 0/0 lock - please report this\n"));
756 lock.context.smbpid = smbpid;
757 lock.context.pid = pid;
758 lock.context.tid = br_lck->fsp->conn->cnum;
761 lock.fnum = br_lck->fsp->fnum;
762 lock.lock_type = lock_type;
763 lock.lock_flav = lock_flav;
765 if (lock_flav == WINDOWS_LOCK) {
766 ret = brl_lock_windows(br_lck, &lock, blocking_lock);
768 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
772 /* sort the lock list */
773 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
776 /* If we're returning an error, return who blocked us. */
777 if (!NT_STATUS_IS_OK(ret) && psmbpid) {
778 *psmbpid = lock.context.smbpid;
783 /****************************************************************************
784 Unlock a range of bytes - Windows semantics.
785 ****************************************************************************/
787 static BOOL brl_unlock_windows(struct messaging_context *msg_ctx,
788 struct byte_range_lock *br_lck,
789 const struct lock_struct *plock)
792 struct lock_struct *locks = br_lck->lock_data;
793 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
796 /* Delete write locks by preference... The lock list
797 is sorted in the zero zero case. */
799 for (i = 0; i < br_lck->num_locks; i++) {
800 struct lock_struct *lock = &locks[i];
802 if (lock->lock_type == WRITE_LOCK &&
803 brl_same_context(&lock->context, &plock->context) &&
804 lock->fnum == plock->fnum &&
805 lock->lock_flav == WINDOWS_LOCK &&
806 lock->start == plock->start &&
807 lock->size == plock->size) {
809 /* found it - delete it */
810 deleted_lock_type = lock->lock_type;
815 if (i != br_lck->num_locks) {
816 /* We found it - don't search again. */
817 goto unlock_continue;
821 for (i = 0; i < br_lck->num_locks; i++) {
822 struct lock_struct *lock = &locks[i];
824 /* Only remove our own locks that match in start, size, and flavour. */
825 if (brl_same_context(&lock->context, &plock->context) &&
826 lock->fnum == plock->fnum &&
827 lock->lock_flav == WINDOWS_LOCK &&
828 lock->start == plock->start &&
829 lock->size == plock->size ) {
830 deleted_lock_type = lock->lock_type;
835 if (i == br_lck->num_locks) {
836 /* we didn't find it */
844 /* Actually delete the lock. */
845 if (i < br_lck->num_locks - 1) {
846 memmove(&locks[i], &locks[i+1],
847 sizeof(*locks)*((br_lck->num_locks-1) - i));
850 br_lck->num_locks -= 1;
851 br_lck->modified = True;
853 /* Unlock the underlying POSIX regions. */
854 if(lp_posix_locking(br_lck->fsp->conn->params)) {
855 release_posix_lock_windows_flavour(br_lck->fsp,
864 /* Send unlock messages to any pending waiters that overlap. */
865 for (j=0; j < br_lck->num_locks; j++) {
866 struct lock_struct *pend_lock = &locks[j];
868 /* Ignore non-pending locks. */
869 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
873 /* We could send specific lock info here... */
874 if (brl_pending_overlap(plock, pend_lock)) {
875 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
876 procid_str_static(&pend_lock->context.pid )));
878 messaging_send(msg_ctx, pend_lock->context.pid,
879 MSG_SMB_UNLOCK, &data_blob_null);
886 /****************************************************************************
887 Unlock a range of bytes - POSIX semantics.
888 ****************************************************************************/
890 static BOOL brl_unlock_posix(struct messaging_context *msg_ctx,
891 struct byte_range_lock *br_lck,
892 const struct lock_struct *plock)
894 unsigned int i, j, count;
895 struct lock_struct *tp;
896 struct lock_struct *locks = br_lck->lock_data;
897 BOOL overlap_found = False;
899 /* No zero-zero locks for POSIX. */
900 if (plock->start == 0 && plock->size == 0) {
904 /* Don't allow 64-bit lock wrap. */
905 if (plock->start + plock->size < plock->start ||
906 plock->start + plock->size < plock->size) {
907 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
911 /* The worst case scenario here is we have to split an
912 existing POSIX lock range into two, so we need at most
915 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
917 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
922 for (i = 0; i < br_lck->num_locks; i++) {
923 struct lock_struct *lock = &locks[i];
924 struct lock_struct tmp_lock[3];
925 BOOL lock_was_added = False;
926 unsigned int tmp_count;
928 /* Only remove our own locks - ignore fnum. */
929 if (IS_PENDING_LOCK(lock->lock_type) ||
930 !brl_same_context(&lock->context, &plock->context)) {
931 memcpy(&tp[count], lock, sizeof(struct lock_struct));
936 /* Work out overlaps. */
937 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
939 if (tmp_count == 1) {
940 /* Ether the locks didn't overlap, or the unlock completely
941 overlapped this lock. If it didn't overlap, then there's
942 no change in the locks. */
943 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
944 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
945 /* No change in this lock. */
946 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
949 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
950 overlap_found = True;
953 } else if (tmp_count == 2) {
954 /* The unlock overlapped an existing lock. Copy the truncated
955 lock into the lock array. */
956 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
957 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
958 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
959 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
960 if (tmp_lock[0].size != locks[i].size) {
961 overlap_found = True;
964 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
965 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
966 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
967 if (tmp_lock[1].start != locks[i].start) {
968 overlap_found = True;
974 /* tmp_count == 3 - (we split a lock range in two). */
975 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
976 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
977 SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
979 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
981 memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
983 overlap_found = True;
984 /* Optimisation... */
985 /* We know we're finished here as we can't overlap any
986 more POSIX locks. Copy the rest of the lock array. */
987 if (i < br_lck->num_locks - 1) {
988 memcpy(&tp[count], &locks[i+1],
989 sizeof(*locks)*((br_lck->num_locks-1) - i));
990 count += ((br_lck->num_locks-1) - i);
996 if (!overlap_found) {
997 /* Just ignore - no change. */
999 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1003 /* Unlock any POSIX regions. */
1004 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1005 release_posix_lock_posix_flavour(br_lck->fsp,
1013 /* Realloc so we don't leak entries per unlock call. */
1015 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1017 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1021 /* We deleted the last lock. */
1026 br_lck->num_locks = count;
1027 SAFE_FREE(br_lck->lock_data);
1029 br_lck->lock_data = tp;
1030 br_lck->modified = True;
1032 /* Send unlock messages to any pending waiters that overlap. */
1034 for (j=0; j < br_lck->num_locks; j++) {
1035 struct lock_struct *pend_lock = &locks[j];
1037 /* Ignore non-pending locks. */
1038 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1042 /* We could send specific lock info here... */
1043 if (brl_pending_overlap(plock, pend_lock)) {
1044 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1045 procid_str_static(&pend_lock->context.pid )));
1047 messaging_send(msg_ctx, pend_lock->context.pid,
1048 MSG_SMB_UNLOCK, &data_blob_null);
1055 /****************************************************************************
1056 Unlock a range of bytes.
1057 ****************************************************************************/
1059 BOOL brl_unlock(struct messaging_context *msg_ctx,
1060 struct byte_range_lock *br_lck,
1062 struct server_id pid,
1065 enum brl_flavour lock_flav)
1067 struct lock_struct lock;
1069 lock.context.smbpid = smbpid;
1070 lock.context.pid = pid;
1071 lock.context.tid = br_lck->fsp->conn->cnum;
1074 lock.fnum = br_lck->fsp->fnum;
1075 lock.lock_type = UNLOCK_LOCK;
1076 lock.lock_flav = lock_flav;
1078 if (lock_flav == WINDOWS_LOCK) {
1079 return brl_unlock_windows(msg_ctx, br_lck, &lock);
1081 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1085 /****************************************************************************
1086 Test if we could add a lock if we wanted to.
1087 Returns True if the region required is currently unlocked, False if locked.
1088 ****************************************************************************/
1090 BOOL brl_locktest(struct byte_range_lock *br_lck,
1092 struct server_id pid,
1095 enum brl_type lock_type,
1096 enum brl_flavour lock_flav)
1100 struct lock_struct lock;
1101 const struct lock_struct *locks = br_lck->lock_data;
1102 files_struct *fsp = br_lck->fsp;
1104 lock.context.smbpid = smbpid;
1105 lock.context.pid = pid;
1106 lock.context.tid = br_lck->fsp->conn->cnum;
1109 lock.fnum = fsp->fnum;
1110 lock.lock_type = lock_type;
1111 lock.lock_flav = lock_flav;
1113 /* Make sure existing locks don't conflict */
1114 for (i=0; i < br_lck->num_locks; i++) {
1116 * Our own locks don't conflict.
1118 if (brl_conflict_other(&locks[i], &lock)) {
1124 * There is no lock held by an SMB daemon, check to
1125 * see if there is a POSIX lock from a UNIX or NFS process.
1126 * This only conflicts with Windows locks, not POSIX locks.
1129 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1130 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1132 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1133 (double)start, (double)size, ret ? "locked" : "unlocked",
1134 fsp->fnum, fsp->fsp_name ));
1136 /* We need to return the inverse of is_posix_locked. */
1140 /* no conflicts - we could have added it */
1144 /****************************************************************************
1145 Query for existing locks.
1146 ****************************************************************************/
1148 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1150 struct server_id pid,
1153 enum brl_type *plock_type,
1154 enum brl_flavour lock_flav)
1157 struct lock_struct lock;
1158 const struct lock_struct *locks = br_lck->lock_data;
1159 files_struct *fsp = br_lck->fsp;
1161 lock.context.smbpid = *psmbpid;
1162 lock.context.pid = pid;
1163 lock.context.tid = br_lck->fsp->conn->cnum;
1164 lock.start = *pstart;
1166 lock.fnum = fsp->fnum;
1167 lock.lock_type = *plock_type;
1168 lock.lock_flav = lock_flav;
1170 /* Make sure existing locks don't conflict */
1171 for (i=0; i < br_lck->num_locks; i++) {
1172 const struct lock_struct *exlock = &locks[i];
1173 BOOL conflict = False;
1175 if (exlock->lock_flav == WINDOWS_LOCK) {
1176 conflict = brl_conflict(exlock, &lock);
1178 conflict = brl_conflict_posix(exlock, &lock);
1182 *psmbpid = exlock->context.smbpid;
1183 *pstart = exlock->start;
1184 *psize = exlock->size;
1185 *plock_type = exlock->lock_type;
1186 return NT_STATUS_LOCK_NOT_GRANTED;
1191 * There is no lock held by an SMB daemon, check to
1192 * see if there is a POSIX lock from a UNIX or NFS process.
1195 if(lp_posix_locking(fsp->conn->params)) {
1196 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1198 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1199 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1200 fsp->fnum, fsp->fsp_name ));
1203 /* Hmmm. No clue what to set smbpid to - use -1. */
1205 return NT_STATUS_LOCK_NOT_GRANTED;
1209 return NT_STATUS_OK;
1212 /****************************************************************************
1213 Remove a particular pending lock.
1214 ****************************************************************************/
1216 BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
1218 struct server_id pid,
1221 enum brl_flavour lock_flav)
1224 struct lock_struct *locks = br_lck->lock_data;
1225 struct lock_context context;
1227 context.smbpid = smbpid;
1229 context.tid = br_lck->fsp->conn->cnum;
1231 for (i = 0; i < br_lck->num_locks; i++) {
1232 struct lock_struct *lock = &locks[i];
1234 /* For pending locks we *always* care about the fnum. */
1235 if (brl_same_context(&lock->context, &context) &&
1236 lock->fnum == br_lck->fsp->fnum &&
1237 IS_PENDING_LOCK(lock->lock_type) &&
1238 lock->lock_flav == lock_flav &&
1239 lock->start == start &&
1240 lock->size == size) {
1245 if (i == br_lck->num_locks) {
1246 /* Didn't find it. */
1250 if (i < br_lck->num_locks - 1) {
1251 /* Found this particular pending lock - delete it */
1252 memmove(&locks[i], &locks[i+1],
1253 sizeof(*locks)*((br_lck->num_locks-1) - i));
1256 br_lck->num_locks -= 1;
1257 br_lck->modified = True;
1261 /****************************************************************************
1262 Remove any locks associated with a open file.
1263 We return True if this process owns any other Windows locks on this
1264 fd and so we should not immediately close the fd.
1265 ****************************************************************************/
1267 void brl_close_fnum(struct messaging_context *msg_ctx,
1268 struct byte_range_lock *br_lck)
1270 files_struct *fsp = br_lck->fsp;
1271 uint16 tid = fsp->conn->cnum;
1272 int fnum = fsp->fnum;
1273 unsigned int i, j, dcount=0;
1274 int num_deleted_windows_locks = 0;
1275 struct lock_struct *locks = br_lck->lock_data;
1276 struct server_id pid = procid_self();
1277 BOOL unlock_individually = False;
1279 if(lp_posix_locking(fsp->conn->params)) {
1281 /* Check if there are any Windows locks associated with this dev/ino
1282 pair that are not this fnum. If so we need to call unlock on each
1283 one in order to release the system POSIX locks correctly. */
1285 for (i=0; i < br_lck->num_locks; i++) {
1286 struct lock_struct *lock = &locks[i];
1288 if (!procid_equal(&lock->context.pid, &pid)) {
1292 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1293 continue; /* Ignore pending. */
1296 if (lock->context.tid != tid || lock->fnum != fnum) {
1297 unlock_individually = True;
1302 if (unlock_individually) {
1303 struct lock_struct *locks_copy;
1304 unsigned int num_locks_copy;
1306 /* Copy the current lock array. */
1307 if (br_lck->num_locks) {
1308 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1310 smb_panic("brl_close_fnum: talloc failed");
1316 num_locks_copy = br_lck->num_locks;
1318 for (i=0; i < num_locks_copy; i++) {
1319 struct lock_struct *lock = &locks_copy[i];
1321 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1322 (lock->fnum == fnum)) {
1325 lock->context.smbpid,
1336 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1338 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1340 for (i=0; i < br_lck->num_locks; i++) {
1341 struct lock_struct *lock = &locks[i];
1342 BOOL del_this_lock = False;
1344 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1345 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1346 del_this_lock = True;
1347 num_deleted_windows_locks++;
1348 } else if (lock->lock_flav == POSIX_LOCK) {
1349 del_this_lock = True;
1353 if (del_this_lock) {
1354 /* Send unlock messages to any pending waiters that overlap. */
1355 for (j=0; j < br_lck->num_locks; j++) {
1356 struct lock_struct *pend_lock = &locks[j];
1358 /* Ignore our own or non-pending locks. */
1359 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1363 /* Optimisation - don't send to this fnum as we're
1365 if (pend_lock->context.tid == tid &&
1366 procid_equal(&pend_lock->context.pid, &pid) &&
1367 pend_lock->fnum == fnum) {
1371 /* We could send specific lock info here... */
1372 if (brl_pending_overlap(lock, pend_lock)) {
1373 messaging_send(msg_ctx, pend_lock->context.pid,
1374 MSG_SMB_UNLOCK, &data_blob_null);
1378 /* found it - delete it */
1379 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1380 memmove(&locks[i], &locks[i+1],
1381 sizeof(*locks)*((br_lck->num_locks-1) - i));
1383 br_lck->num_locks--;
1384 br_lck->modified = True;
1390 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1391 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1392 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1396 /****************************************************************************
1397 Ensure this set of lock entries is valid.
1398 ****************************************************************************/
1400 static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1403 unsigned int num_valid_entries = 0;
1404 struct lock_struct *locks = *pplocks;
1406 for (i = 0; i < *pnum_entries; i++) {
1407 struct lock_struct *lock_data = &locks[i];
1408 if (!process_exists(lock_data->context.pid)) {
1409 /* This process no longer exists - mark this
1410 entry as invalid by zeroing it. */
1411 ZERO_STRUCTP(lock_data);
1413 num_valid_entries++;
1417 if (num_valid_entries != *pnum_entries) {
1418 struct lock_struct *new_lock_data = NULL;
1420 if (num_valid_entries) {
1421 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1422 if (!new_lock_data) {
1423 DEBUG(3, ("malloc fail\n"));
1427 num_valid_entries = 0;
1428 for (i = 0; i < *pnum_entries; i++) {
1429 struct lock_struct *lock_data = &locks[i];
1430 if (lock_data->context.smbpid &&
1431 lock_data->context.tid) {
1432 /* Valid (nonzero) entry - copy it. */
1433 memcpy(&new_lock_data[num_valid_entries],
1434 lock_data, sizeof(struct lock_struct));
1435 num_valid_entries++;
1440 SAFE_FREE(*pplocks);
1441 *pplocks = new_lock_data;
1442 *pnum_entries = num_valid_entries;
1448 struct brl_forall_cb {
1449 void (*fn)(struct file_id id, struct server_id pid,
1450 enum brl_type lock_type,
1451 enum brl_flavour lock_flav,
1452 br_off start, br_off size,
1453 void *private_data);
1457 /****************************************************************************
1458 Traverse the whole database with this function, calling traverse_callback
1460 ****************************************************************************/
1462 static int traverse_fn(struct db_record *rec, void *state)
1464 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1465 struct lock_struct *locks;
1466 struct file_id *key;
1468 unsigned int num_locks = 0;
1469 unsigned int orig_num_locks = 0;
1471 /* In a traverse function we must make a copy of
1472 dbuf before modifying it. */
1474 locks = (struct lock_struct *)memdup(rec->value.dptr,
1477 return -1; /* Terminate traversal. */
1480 key = (struct file_id *)rec->key.dptr;
1481 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1483 /* Ensure the lock db is clean of entries from invalid processes. */
1485 if (!validate_lock_entries(&num_locks, &locks)) {
1487 return -1; /* Terminate traversal */
1490 if (orig_num_locks != num_locks) {
1493 data.dptr = (uint8_t *)locks;
1494 data.dsize = num_locks*sizeof(struct lock_struct);
1495 rec->store(rec, data, TDB_REPLACE);
1497 rec->delete_rec(rec);
1501 for ( i=0; i<num_locks; i++) {
1503 locks[i].context.pid,
1515 /*******************************************************************
1516 Call the specified function on each lock in the database.
1517 ********************************************************************/
1519 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1520 enum brl_type lock_type,
1521 enum brl_flavour lock_flav,
1522 br_off start, br_off size,
1523 void *private_data),
1526 struct brl_forall_cb cb;
1532 cb.private_data = private_data;
1533 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1536 /*******************************************************************
1537 Store a potentially modified set of byte range lock data back into
1540 ********************************************************************/
1542 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1546 key.dptr = (uint8 *)&br_lck->key;
1547 key.dsize = sizeof(struct file_id);
1549 if (br_lck->read_only) {
1550 SMB_ASSERT(!br_lck->modified);
1553 if (!br_lck->modified) {
1557 if (br_lck->num_locks == 0) {
1558 /* No locks - delete this entry. */
1559 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1560 if (!NT_STATUS_IS_OK(status)) {
1561 DEBUG(0, ("delete_rec returned %s\n",
1562 nt_errstr(status)));
1563 smb_panic("Could not delete byte range lock entry");
1569 data.dptr = (uint8 *)br_lck->lock_data;
1570 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1572 status = br_lck->record->store(br_lck->record, data,
1574 if (!NT_STATUS_IS_OK(status)) {
1575 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1576 smb_panic("Could not store byte range mode entry");
1582 SAFE_FREE(br_lck->lock_data);
1583 TALLOC_FREE(br_lck->record);
1587 /*******************************************************************
1588 Fetch a set of byte range lock data from the database.
1589 Leave the record locked.
1590 TALLOC_FREE(brl) will release the lock in the destructor.
1591 ********************************************************************/
1593 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1594 files_struct *fsp, BOOL read_only)
1597 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1599 if (br_lck == NULL) {
1604 br_lck->num_locks = 0;
1605 br_lck->modified = False;
1606 memset(&br_lck->key, '\0', sizeof(struct file_id));
1607 br_lck->key = fsp->file_id;
1609 key.dptr = (uint8 *)&br_lck->key;
1610 key.dsize = sizeof(struct file_id);
1612 if (!fsp->lockdb_clean) {
1613 /* We must be read/write to clean
1614 the dead entries. */
1619 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1620 DEBUG(3, ("Could not fetch byte range lock record\n"));
1621 TALLOC_FREE(br_lck);
1624 br_lck->record = NULL;
1627 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1629 if (br_lck->record == NULL) {
1630 DEBUG(3, ("Could not lock byte range lock entry\n"));
1631 TALLOC_FREE(br_lck);
1635 data = br_lck->record->value;
1638 br_lck->read_only = read_only;
1639 br_lck->lock_data = NULL;
1641 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1643 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1645 if (br_lck->num_locks != 0) {
1646 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1648 if (br_lck->lock_data == NULL) {
1649 DEBUG(0, ("malloc failed\n"));
1650 TALLOC_FREE(br_lck);
1654 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1657 if (!fsp->lockdb_clean) {
1658 int orig_num_locks = br_lck->num_locks;
1660 /* This is the first time we've accessed this. */
1661 /* Go through and ensure all entries exist - remove any that don't. */
1662 /* Makes the lockdb self cleaning at low cost. */
1664 if (!validate_lock_entries(&br_lck->num_locks,
1665 &br_lck->lock_data)) {
1666 SAFE_FREE(br_lck->lock_data);
1667 TALLOC_FREE(br_lck);
1671 /* Ensure invalid locks are cleaned up in the destructor. */
1672 if (orig_num_locks != br_lck->num_locks) {
1673 br_lck->modified = True;
1676 /* Mark the lockdb as "clean" as seen from this open file. */
1677 fsp->lockdb_clean = True;
1680 if (DEBUGLEVEL >= 10) {
1682 struct lock_struct *locks = br_lck->lock_data;
1683 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1685 file_id_string_tos(&fsp->file_id)));
1686 for( i = 0; i < br_lck->num_locks; i++) {
1687 print_lock_struct(i, &locks[i]);
1693 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1696 return brl_get_locks_internal(mem_ctx, fsp, False);
1699 struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1702 return brl_get_locks_internal(mem_ctx, fsp, True);
1705 struct brl_revalidate_state {
1708 struct server_id *pids;
1712 * Collect PIDs of all processes with pending entries
1715 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1716 enum brl_type lock_type,
1717 enum brl_flavour lock_flav,
1718 br_off start, br_off size,
1721 struct brl_revalidate_state *state =
1722 (struct brl_revalidate_state *)private_data;
1724 if (!IS_PENDING_LOCK(lock_type)) {
1728 add_to_large_array(state, sizeof(pid), (void *)&pid,
1729 &state->pids, &state->num_pids,
1730 &state->array_size);
1734 * qsort callback to sort the processes
1737 static int compare_procids(const void *p1, const void *p2)
1739 const struct server_id *i1 = (struct server_id *)p1;
1740 const struct server_id *i2 = (struct server_id *)p2;
1742 if (i1->pid < i2->pid) return -1;
1743 if (i2->pid > i2->pid) return 1;
1748 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1749 * locks so that they retry. Mainly used in the cluster code after a node has
1752 * Done in two steps to avoid double-sends: First we collect all entries in an
1753 * array, then qsort that array and only send to non-dupes.
1756 static void brl_revalidate(struct messaging_context *msg_ctx,
1759 struct server_id server_id,
1762 struct brl_revalidate_state *state;
1764 struct server_id last_pid;
1766 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
1767 DEBUG(0, ("talloc failed\n"));
1771 brl_forall(brl_revalidate_collect, state);
1773 if (state->array_size == -1) {
1774 DEBUG(0, ("talloc failed\n"));
1778 if (state->num_pids == 0) {
1782 qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
1785 ZERO_STRUCT(last_pid);
1787 for (i=0; i<state->num_pids; i++) {
1788 if (procid_equal(&last_pid, &state->pids[i])) {
1790 * We've seen that one already
1795 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
1797 last_pid = state->pids[i];
1805 void brl_register_msgs(struct messaging_context *msg_ctx)
1807 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,