2 Unix SMB/CIFS implementation.
4 Copyright (C) Jeremy Allison 1992-2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
25 #include "system/filesys.h"
26 #include "locking/proto.h"
27 #include "dbwrap/dbwrap.h"
28 #include "dbwrap/dbwrap_rbt.h"
32 #define DBGC_CLASS DBGC_LOCKING
35 * The pending close database handle.
38 static struct db_context *posix_pending_close_db;
40 /****************************************************************************
41 First - the functions that deal with the underlying system locks - these
42 functions are used no matter if we're mapping CIFS Windows locks or CIFS
43 POSIX locks onto POSIX.
44 ****************************************************************************/
46 /****************************************************************************
47 Utility function to map a lock type correctly depending on the open
49 ****************************************************************************/
51 static int map_posix_lock_type( files_struct *fsp, enum brl_type lock_type)
53 if((lock_type == WRITE_LOCK) && !fsp->can_write) {
55 * Many UNIX's cannot get a write lock on a file opened read-only.
56 * Win32 locking semantics allow this.
57 * Do the best we can and attempt a read-only lock.
59 DEBUG(10,("map_posix_lock_type: Downgrading write lock to read due to read-only file.\n"));
64 * This return should be the most normal, as we attempt
65 * to always open files read/write.
68 return (lock_type == READ_LOCK) ? F_RDLCK : F_WRLCK;
71 /****************************************************************************
73 ****************************************************************************/
75 static const char *posix_lock_type_name(int lock_type)
77 return (lock_type == F_RDLCK) ? "READ" : "WRITE";
80 /****************************************************************************
81 Check to see if the given unsigned lock range is within the possible POSIX
82 range. Modifies the given args to be in range if possible, just returns
84 ****************************************************************************/
86 static bool posix_lock_in_range(off_t *offset_out, off_t *count_out,
87 uint64_t u_offset, uint64_t u_count)
89 off_t offset = (off_t)u_offset;
90 off_t count = (off_t)u_count;
93 * For the type of system we are, attempt to
94 * find the maximum positive lock offset as an off_t.
97 #if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */
99 off_t max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET);
102 * In this case off_t is 64 bits,
103 * and the underlying system can handle 64 bit signed locks.
106 off_t mask2 = ((off_t)0x4) << (SMB_OFF_T_BITS-4);
107 off_t mask = (mask2<<1);
108 off_t max_positive_lock_offset = ~mask;
112 * POSIX locks of length zero mean lock to end-of-file.
113 * Win32 locks of length zero are point probes. Ignore
114 * any Win32 locks of length zero. JRA.
117 if (count == (off_t)0) {
118 DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n"));
123 * If the given offset was > max_positive_lock_offset then we cannot map this at all
127 if (u_offset & ~((uint64_t)max_positive_lock_offset)) {
128 DEBUG(10,("posix_lock_in_range: (offset = %.0f) offset > %.0f and we cannot handle this. Ignoring lock.\n",
129 (double)u_offset, (double)((uint64_t)max_positive_lock_offset) ));
134 * We must truncate the count to less than max_positive_lock_offset.
137 if (u_count & ~((uint64_t)max_positive_lock_offset)) {
138 count = max_positive_lock_offset;
142 * Truncate count to end at max lock offset.
145 if (offset + count < 0 || offset + count > max_positive_lock_offset) {
146 count = max_positive_lock_offset - offset;
150 * If we ate all the count, ignore this lock.
154 DEBUG(10,("posix_lock_in_range: Count = 0. Ignoring lock u_offset = %.0f, u_count = %.0f\n",
155 (double)u_offset, (double)u_count ));
160 * The mapping was successful.
163 DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n",
164 (double)offset, (double)count ));
166 *offset_out = offset;
172 bool smb_vfs_call_lock(struct vfs_handle_struct *handle,
173 struct files_struct *fsp, int op, off_t offset,
174 off_t count, int type)
177 return handle->fns->lock_fn(handle, fsp, op, offset, count, type);
180 /****************************************************************************
181 Actual function that does POSIX locks. Copes with 64 -> 32 bit cruft and
182 broken NFS implementations.
183 ****************************************************************************/
185 static bool posix_fcntl_lock(files_struct *fsp, int op, off_t offset, off_t count, int type)
189 DEBUG(8,("posix_fcntl_lock %d %d %.0f %.0f %d\n",fsp->fh->fd,op,(double)offset,(double)count,type));
191 ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
193 if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
195 DEBUG(0,("posix_fcntl_lock: WARNING: lock request at offset %.0f, length %.0f returned\n",
196 (double)offset,(double)count));
197 DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno)));
198 DEBUGADD(0,("on 32 bit NFS mounted file systems.\n"));
201 * If the offset is > 0x7FFFFFFF then this will cause problems on
202 * 32 bit NFS mounted filesystems. Just ignore it.
205 if (offset & ~((off_t)0x7fffffff)) {
206 DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
210 if (count & ~((off_t)0x7fffffff)) {
211 /* 32 bit NFS file system, retry with smaller offset */
212 DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
215 ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
219 DEBUG(8,("posix_fcntl_lock: Lock call %s\n", ret ? "successful" : "failed"));
223 bool smb_vfs_call_getlock(struct vfs_handle_struct *handle,
224 struct files_struct *fsp, off_t *poffset,
225 off_t *pcount, int *ptype, pid_t *ppid)
228 return handle->fns->getlock_fn(handle, fsp, poffset, pcount, ptype,
232 /****************************************************************************
233 Actual function that gets POSIX locks. Copes with 64 -> 32 bit cruft and
234 broken NFS implementations.
235 ****************************************************************************/
237 static bool posix_fcntl_getlock(files_struct *fsp, off_t *poffset, off_t *pcount, int *ptype)
242 DEBUG(8,("posix_fcntl_getlock %d %.0f %.0f %d\n",
243 fsp->fh->fd,(double)*poffset,(double)*pcount,*ptype));
245 ret = SMB_VFS_GETLOCK(fsp, poffset, pcount, ptype, &pid);
247 if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
249 DEBUG(0,("posix_fcntl_getlock: WARNING: lock request at offset %.0f, length %.0f returned\n",
250 (double)*poffset,(double)*pcount));
251 DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno)));
252 DEBUGADD(0,("on 32 bit NFS mounted file systems.\n"));
255 * If the offset is > 0x7FFFFFFF then this will cause problems on
256 * 32 bit NFS mounted filesystems. Just ignore it.
259 if (*poffset & ~((off_t)0x7fffffff)) {
260 DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
264 if (*pcount & ~((off_t)0x7fffffff)) {
265 /* 32 bit NFS file system, retry with smaller offset */
266 DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
268 *pcount &= 0x7fffffff;
269 ret = SMB_VFS_GETLOCK(fsp,poffset,pcount,ptype,&pid);
273 DEBUG(8,("posix_fcntl_getlock: Lock query call %s\n", ret ? "successful" : "failed"));
277 /****************************************************************************
278 POSIX function to see if a file region is locked. Returns True if the
279 region is locked, False otherwise.
280 ****************************************************************************/
282 bool is_posix_locked(files_struct *fsp,
285 enum brl_type *plock_type,
286 enum brl_flavour lock_flav)
290 int posix_lock_type = map_posix_lock_type(fsp,*plock_type);
292 DEBUG(10,("is_posix_locked: File %s, offset = %.0f, count = %.0f, "
293 "type = %s\n", fsp_str_dbg(fsp), (double)*pu_offset,
294 (double)*pu_count, posix_lock_type_name(*plock_type)));
297 * If the requested lock won't fit in the POSIX range, we will
298 * never set it, so presume it is not locked.
301 if(!posix_lock_in_range(&offset, &count, *pu_offset, *pu_count)) {
305 if (!posix_fcntl_getlock(fsp,&offset,&count,&posix_lock_type)) {
309 if (posix_lock_type == F_UNLCK) {
313 if (lock_flav == POSIX_LOCK) {
314 /* Only POSIX lock queries need to know the details. */
315 *pu_offset = (uint64_t)offset;
316 *pu_count = (uint64_t)count;
317 *plock_type = (posix_lock_type == F_RDLCK) ? READ_LOCK : WRITE_LOCK;
322 /****************************************************************************
323 Next - the functions that deal with in memory database storing representations
324 of either Windows CIFS locks or POSIX CIFS locks.
325 ****************************************************************************/
327 /* The key used in the in-memory POSIX databases. */
329 struct lock_ref_count_key {
334 /*******************************************************************
335 Form a static locking key for a dev/inode pair for the lock ref count
336 ******************************************************************/
338 static TDB_DATA locking_ref_count_key_fsp(files_struct *fsp,
339 struct lock_ref_count_key *tmp)
342 tmp->id = fsp->file_id;
344 return make_tdb_data((uint8_t *)tmp, sizeof(*tmp));
347 /*******************************************************************
348 Convenience function to get an fd_array key from an fsp.
349 ******************************************************************/
351 static TDB_DATA fd_array_key_fsp(files_struct *fsp)
353 return make_tdb_data((uint8 *)&fsp->file_id, sizeof(fsp->file_id));
356 /*******************************************************************
357 Create the in-memory POSIX lock databases.
358 ********************************************************************/
360 bool posix_locking_init(bool read_only)
362 if (posix_pending_close_db != NULL) {
366 posix_pending_close_db = db_open_rbt(NULL);
368 if (posix_pending_close_db == NULL) {
369 DEBUG(0,("Failed to open POSIX pending close database.\n"));
376 /*******************************************************************
377 Delete the in-memory POSIX lock databases.
378 ********************************************************************/
380 bool posix_locking_end(void)
383 * Shouldn't we close all fd's here?
385 TALLOC_FREE(posix_pending_close_db);
389 /****************************************************************************
390 Next - the functions that deal with storing fd's that have outstanding
391 POSIX locks when closed.
392 ****************************************************************************/
394 /****************************************************************************
395 The records in posix_pending_close_tdb are composed of an array of ints
396 keyed by dev/ino pair.
397 The first int is a reference count of the number of outstanding locks on
398 all open fd's on this dev/ino pair. Any subsequent ints are the fd's that
399 were open on this dev/ino pair that should have been closed, but can't as
400 the lock ref count is non zero.
401 ****************************************************************************/
403 /****************************************************************************
404 Keep a reference count of the number of Windows locks open on this dev/ino
405 pair. Creates entry if it doesn't exist.
406 ****************************************************************************/
408 static void increment_windows_lock_ref_count(files_struct *fsp)
410 struct lock_ref_count_key tmp;
411 struct db_record *rec;
412 int lock_ref_count = 0;
416 rec = dbwrap_fetch_locked(
417 posix_pending_close_db, talloc_tos(),
418 locking_ref_count_key_fsp(fsp, &tmp));
420 SMB_ASSERT(rec != NULL);
422 value = dbwrap_record_get_value(rec);
424 if (value.dptr != NULL) {
425 SMB_ASSERT(value.dsize == sizeof(lock_ref_count));
426 memcpy(&lock_ref_count, value.dptr,
427 sizeof(lock_ref_count));
432 status = dbwrap_record_store(rec,
433 make_tdb_data((uint8 *)&lock_ref_count,
434 sizeof(lock_ref_count)), 0);
436 SMB_ASSERT(NT_STATUS_IS_OK(status));
440 DEBUG(10,("increment_windows_lock_ref_count for file now %s = %d\n",
441 fsp_str_dbg(fsp), lock_ref_count));
444 /****************************************************************************
445 Bulk delete - subtract as many locks as we've just deleted.
446 ****************************************************************************/
448 void reduce_windows_lock_ref_count(files_struct *fsp, unsigned int dcount)
450 struct lock_ref_count_key tmp;
451 struct db_record *rec;
452 int lock_ref_count = 0;
456 rec = dbwrap_fetch_locked(
457 posix_pending_close_db, talloc_tos(),
458 locking_ref_count_key_fsp(fsp, &tmp));
460 value = dbwrap_record_get_value(rec);
462 SMB_ASSERT((rec != NULL)
463 && (value.dptr != NULL)
464 && (value.dsize == sizeof(lock_ref_count)));
466 memcpy(&lock_ref_count, value.dptr, sizeof(lock_ref_count));
468 SMB_ASSERT(lock_ref_count > 0);
470 lock_ref_count -= dcount;
472 status = dbwrap_record_store(rec,
473 make_tdb_data((uint8 *)&lock_ref_count,
474 sizeof(lock_ref_count)), 0);
476 SMB_ASSERT(NT_STATUS_IS_OK(status));
480 DEBUG(10,("reduce_windows_lock_ref_count for file now %s = %d\n",
481 fsp_str_dbg(fsp), lock_ref_count));
484 static void decrement_windows_lock_ref_count(files_struct *fsp)
486 reduce_windows_lock_ref_count(fsp, 1);
489 /****************************************************************************
490 Fetch the lock ref count.
491 ****************************************************************************/
493 static int get_windows_lock_ref_count(files_struct *fsp)
495 struct lock_ref_count_key tmp;
498 int lock_ref_count = 0;
500 status = dbwrap_fetch(
501 posix_pending_close_db, talloc_tos(),
502 locking_ref_count_key_fsp(fsp, &tmp), &dbuf);
504 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
508 if (!NT_STATUS_IS_OK(status)) {
509 DEBUG(0, ("get_windows_lock_ref_count: Error fetching "
510 "lock ref count for file %s: %s\n",
511 fsp_str_dbg(fsp), nt_errstr(status)));
515 if (dbuf.dsize != sizeof(lock_ref_count)) {
516 DEBUG(0, ("get_windows_lock_ref_count: invalid entry "
517 "in lock ref count record for file %s: "
518 "(invalid data size %u)\n",
519 fsp_str_dbg(fsp), (unsigned int)dbuf.dsize));
523 memcpy(&lock_ref_count, dbuf.dptr, sizeof(lock_ref_count));
524 TALLOC_FREE(dbuf.dptr);
527 DEBUG(10,("get_windows_lock_count for file %s = %d\n",
528 fsp_str_dbg(fsp), lock_ref_count));
530 return lock_ref_count;
533 /****************************************************************************
534 Delete a lock_ref_count entry.
535 ****************************************************************************/
537 static void delete_windows_lock_ref_count(files_struct *fsp)
539 struct lock_ref_count_key tmp;
540 struct db_record *rec;
542 rec = dbwrap_fetch_locked(
543 posix_pending_close_db, talloc_tos(),
544 locking_ref_count_key_fsp(fsp, &tmp));
546 SMB_ASSERT(rec != NULL);
548 /* Not a bug if it doesn't exist - no locks were ever granted. */
550 dbwrap_record_delete(rec);
553 DEBUG(10,("delete_windows_lock_ref_count for file %s\n",
557 /****************************************************************************
558 Add an fd to the pending close tdb.
559 ****************************************************************************/
561 static void add_fd_to_close_entry(files_struct *fsp)
563 struct db_record *rec;
568 rec = dbwrap_fetch_locked(
569 posix_pending_close_db, talloc_tos(),
570 fd_array_key_fsp(fsp));
572 SMB_ASSERT(rec != NULL);
574 value = dbwrap_record_get_value(rec);
576 new_data = talloc_array(rec, uint8_t,
577 value.dsize + sizeof(fsp->fh->fd));
579 SMB_ASSERT(new_data != NULL);
581 memcpy(new_data, value.dptr, value.dsize);
582 memcpy(new_data + value.dsize,
583 &fsp->fh->fd, sizeof(fsp->fh->fd));
585 status = dbwrap_record_store(
586 rec, make_tdb_data(new_data,
587 value.dsize + sizeof(fsp->fh->fd)), 0);
589 SMB_ASSERT(NT_STATUS_IS_OK(status));
593 DEBUG(10,("add_fd_to_close_entry: added fd %d file %s\n",
594 fsp->fh->fd, fsp_str_dbg(fsp)));
597 /****************************************************************************
598 Remove all fd entries for a specific dev/inode pair from the tdb.
599 ****************************************************************************/
601 static void delete_close_entries(files_struct *fsp)
603 struct db_record *rec;
605 rec = dbwrap_fetch_locked(
606 posix_pending_close_db, talloc_tos(),
607 fd_array_key_fsp(fsp));
609 SMB_ASSERT(rec != NULL);
610 dbwrap_record_delete(rec);
614 /****************************************************************************
615 Get the array of POSIX pending close records for an open fsp. Returns number
617 ****************************************************************************/
619 static size_t get_posix_pending_close_entries(TALLOC_CTX *mem_ctx,
620 files_struct *fsp, int **entries)
625 status = dbwrap_fetch(
626 posix_pending_close_db, mem_ctx, fd_array_key_fsp(fsp),
629 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
634 SMB_ASSERT(NT_STATUS_IS_OK(status));
636 if (dbuf.dsize == 0) {
641 *entries = (int *)dbuf.dptr;
642 return (size_t)(dbuf.dsize / sizeof(int));
645 /****************************************************************************
646 Deal with pending closes needed by POSIX locking support.
647 Note that posix_locking_close_file() is expected to have been called
648 to delete all locks on this fsp before this function is called.
649 ****************************************************************************/
651 int fd_close_posix(struct files_struct *fsp)
655 int *fd_array = NULL;
658 if (!lp_locking(fsp->conn->params) ||
659 !lp_posix_locking(fsp->conn->params))
662 * No locking or POSIX to worry about or we want POSIX semantics
663 * which will lose all locks on all fd's open on this dev/inode,
666 return close(fsp->fh->fd);
669 if (get_windows_lock_ref_count(fsp)) {
672 * There are outstanding locks on this dev/inode pair on
673 * other fds. Add our fd to the pending close tdb and set
677 add_fd_to_close_entry(fsp);
682 * No outstanding locks. Get the pending close fd's
683 * from the tdb and close them all.
686 count = get_posix_pending_close_entries(talloc_tos(), fsp, &fd_array);
689 DEBUG(10,("fd_close_posix: doing close on %u fd's.\n",
690 (unsigned int)count));
692 for(i = 0; i < count; i++) {
693 if (close(fd_array[i]) == -1) {
699 * Delete all fd's stored in the tdb
700 * for this dev/inode pair.
703 delete_close_entries(fsp);
706 TALLOC_FREE(fd_array);
708 /* Don't need a lock ref count on this dev/ino anymore. */
709 delete_windows_lock_ref_count(fsp);
712 * Finally close the fd associated with this fsp.
715 ret = close(fsp->fh->fd);
717 if (ret == 0 && saved_errno != 0) {
725 /****************************************************************************
726 Next - the functions that deal with the mapping CIFS Windows locks onto
727 the underlying system POSIX locks.
728 ****************************************************************************/
731 * Structure used when splitting a lock range
732 * into a POSIX lock range. Doubly linked list.
736 struct lock_list *next;
737 struct lock_list *prev;
742 /****************************************************************************
743 Create a list of lock ranges that don't overlap a given range. Used in calculating
744 POSIX locks and unlocks. This is a difficult function that requires ASCII art to
746 ****************************************************************************/
748 static struct lock_list *posix_lock_list(TALLOC_CTX *ctx,
749 struct lock_list *lhead,
750 const struct lock_context *lock_ctx, /* Lock context lhead belongs to. */
752 const struct lock_struct *plocks,
758 * Check the current lock list on this dev/inode pair.
759 * Quit if the list is deleted.
762 DEBUG(10,("posix_lock_list: curr: start=%.0f,size=%.0f\n",
763 (double)lhead->start, (double)lhead->size ));
765 for (i=0; i<num_locks && lhead; i++) {
766 const struct lock_struct *lock = &plocks[i];
767 struct lock_list *l_curr;
769 /* Ignore all but read/write locks. */
770 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
774 /* Ignore locks not owned by this process. */
775 if (!procid_equal(&lock->context.pid, &lock_ctx->pid)) {
780 * Walk the lock list, checking for overlaps. Note that
781 * the lock list can expand within this loop if the current
782 * range being examined needs to be split.
785 for (l_curr = lhead; l_curr;) {
787 DEBUG(10,("posix_lock_list: lock: fnum=%d: start=%.0f,size=%.0f:type=%s", lock->fnum,
788 (double)lock->start, (double)lock->size, posix_lock_type_name(lock->lock_type) ));
790 if ( (l_curr->start >= (lock->start + lock->size)) ||
791 (lock->start >= (l_curr->start + l_curr->size))) {
793 /* No overlap with existing lock - leave this range alone. */
794 /*********************************************
805 **********************************************/
807 DEBUG(10,(" no overlap case.\n" ));
809 l_curr = l_curr->next;
811 } else if ( (l_curr->start >= lock->start) &&
812 (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
815 * This range is completely overlapped by this existing lock range
816 * and thus should have no effect. Delete it from the list.
818 /*********************************************
822 +---------------------------+
824 +---------------------------+
825 **********************************************/
826 /* Save the next pointer */
827 struct lock_list *ul_next = l_curr->next;
829 DEBUG(10,(" delete case.\n" ));
831 DLIST_REMOVE(lhead, l_curr);
833 break; /* No more list... */
838 } else if ( (l_curr->start >= lock->start) &&
839 (l_curr->start < lock->start + lock->size) &&
840 (l_curr->start + l_curr->size > lock->start + lock->size) ) {
843 * This range overlaps the existing lock range at the high end.
844 * Truncate by moving start to existing range end and reducing size.
846 /*********************************************
857 **********************************************/
859 l_curr->size = (l_curr->start + l_curr->size) - (lock->start + lock->size);
860 l_curr->start = lock->start + lock->size;
862 DEBUG(10,(" truncate high case: start=%.0f,size=%.0f\n",
863 (double)l_curr->start, (double)l_curr->size ));
865 l_curr = l_curr->next;
867 } else if ( (l_curr->start < lock->start) &&
868 (l_curr->start + l_curr->size > lock->start) &&
869 (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
872 * This range overlaps the existing lock range at the low end.
873 * Truncate by reducing size.
875 /*********************************************
886 **********************************************/
888 l_curr->size = lock->start - l_curr->start;
890 DEBUG(10,(" truncate low case: start=%.0f,size=%.0f\n",
891 (double)l_curr->start, (double)l_curr->size ));
893 l_curr = l_curr->next;
895 } else if ( (l_curr->start < lock->start) &&
896 (l_curr->start + l_curr->size > lock->start + lock->size) ) {
898 * Worst case scenario. Range completely overlaps an existing
899 * lock range. Split the request into two, push the new (upper) request
900 * into the dlink list, and continue with the entry after l_new (as we
901 * know that l_new will not overlap with this lock).
903 /*********************************************
904 +---------------------------+
906 +---------------------------+
911 +-------+ +---------+
913 +-------+ +---------+
914 **********************************************/
915 struct lock_list *l_new = talloc(ctx, struct lock_list);
918 DEBUG(0,("posix_lock_list: talloc fail.\n"));
919 return NULL; /* The talloc_destroy takes care of cleanup. */
923 l_new->start = lock->start + lock->size;
924 l_new->size = l_curr->start + l_curr->size - l_new->start;
926 /* Truncate the l_curr. */
927 l_curr->size = lock->start - l_curr->start;
929 DEBUG(10,(" split case: curr: start=%.0f,size=%.0f \
930 new: start=%.0f,size=%.0f\n", (double)l_curr->start, (double)l_curr->size,
931 (double)l_new->start, (double)l_new->size ));
934 * Add into the dlink list after the l_curr point - NOT at lhead.
936 DLIST_ADD_AFTER(lhead, l_new, l_curr);
938 /* And move after the link we added. */
939 l_curr = l_new->next;
944 * This logic case should never happen. Ensure this is the
945 * case by forcing an abort.... Remove in production.
949 if (asprintf(&msg, "logic flaw in cases: l_curr: start = %.0f, size = %.0f : \
950 lock: start = %.0f, size = %.0f", (double)l_curr->start, (double)l_curr->size, (double)lock->start, (double)lock->size ) != -1) {
953 smb_panic("posix_lock_list");
956 } /* end for ( l_curr = lhead; l_curr;) */
957 } /* end for (i=0; i<num_locks && ul_head; i++) */
962 /****************************************************************************
963 POSIX function to acquire a lock. Returns True if the
964 lock could be granted, False if not.
965 ****************************************************************************/
967 bool set_posix_lock_windows_flavour(files_struct *fsp,
970 enum brl_type lock_type,
971 const struct lock_context *lock_ctx,
972 const struct lock_struct *plocks,
978 int posix_lock_type = map_posix_lock_type(fsp,lock_type);
981 TALLOC_CTX *l_ctx = NULL;
982 struct lock_list *llist = NULL;
983 struct lock_list *ll = NULL;
985 DEBUG(5,("set_posix_lock_windows_flavour: File %s, offset = %.0f, "
986 "count = %.0f, type = %s\n", fsp_str_dbg(fsp),
987 (double)u_offset, (double)u_count,
988 posix_lock_type_name(lock_type)));
991 * If the requested lock won't fit in the POSIX range, we will
992 * pretend it was successful.
995 if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
996 increment_windows_lock_ref_count(fsp);
1001 * Windows is very strange. It allows read locks to be overlayed
1002 * (even over a write lock), but leaves the write lock in force until the first
1003 * unlock. It also reference counts the locks. This means the following sequence :
1006 * ------------------------------------------------------------------------
1007 * WRITE LOCK : start = 2, len = 10
1008 * READ LOCK: start =0, len = 10 - FAIL
1009 * READ LOCK : start = 0, len = 14
1010 * READ LOCK: start =0, len = 10 - FAIL
1011 * UNLOCK : start = 2, len = 10
1012 * READ LOCK: start =0, len = 10 - OK
1014 * Under POSIX, the same sequence in steps 1 and 2 would not be reference counted, but
1015 * would leave a single read lock over the 0-14 region.
1018 if ((l_ctx = talloc_init("set_posix_lock")) == NULL) {
1019 DEBUG(0,("set_posix_lock_windows_flavour: unable to init talloc context.\n"));
1023 if ((ll = talloc(l_ctx, struct lock_list)) == NULL) {
1024 DEBUG(0,("set_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
1025 talloc_destroy(l_ctx);
1030 * Create the initial list entry containing the
1031 * lock we want to add.
1038 DLIST_ADD(llist, ll);
1041 * The following call calculates if there are any
1042 * overlapping locks held by this process on
1043 * fd's open on the same file and splits this list
1044 * into a list of lock ranges that do not overlap with existing
1048 llist = posix_lock_list(l_ctx,
1050 lock_ctx, /* Lock context llist belongs to. */
1056 * Add the POSIX locks on the list of ranges returned.
1057 * As the lock is supposed to be added atomically, we need to
1058 * back out all the locks if any one of these calls fail.
1061 for (lock_count = 0, ll = llist; ll; ll = ll->next, lock_count++) {
1065 DEBUG(5,("set_posix_lock_windows_flavour: Real lock: Type = %s: offset = %.0f, count = %.0f\n",
1066 posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
1068 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) {
1070 DEBUG(5,("set_posix_lock_windows_flavour: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n",
1071 posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) ));
1080 * Back out all the POSIX locks we have on fail.
1083 for (ll = llist; lock_count; ll = ll->next, lock_count--) {
1087 DEBUG(5,("set_posix_lock_windows_flavour: Backing out locks: Type = %s: offset = %.0f, count = %.0f\n",
1088 posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
1090 posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK);
1093 /* Remember the number of Windows locks we have on this dev/ino pair. */
1094 increment_windows_lock_ref_count(fsp);
1097 talloc_destroy(l_ctx);
1101 /****************************************************************************
1102 POSIX function to release a lock. Returns True if the
1103 lock could be released, False if not.
1104 ****************************************************************************/
1106 bool release_posix_lock_windows_flavour(files_struct *fsp,
1109 enum brl_type deleted_lock_type,
1110 const struct lock_context *lock_ctx,
1111 const struct lock_struct *plocks,
1117 TALLOC_CTX *ul_ctx = NULL;
1118 struct lock_list *ulist = NULL;
1119 struct lock_list *ul = NULL;
1121 DEBUG(5,("release_posix_lock_windows_flavour: File %s, offset = %.0f, "
1122 "count = %.0f\n", fsp_str_dbg(fsp),
1123 (double)u_offset, (double)u_count));
1125 /* Remember the number of Windows locks we have on this dev/ino pair. */
1126 decrement_windows_lock_ref_count(fsp);
1129 * If the requested lock won't fit in the POSIX range, we will
1130 * pretend it was successful.
1133 if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
1137 if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
1138 DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
1142 if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) {
1143 DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
1144 talloc_destroy(ul_ctx);
1149 * Create the initial list entry containing the
1150 * lock we want to remove.
1157 DLIST_ADD(ulist, ul);
1160 * The following call calculates if there are any
1161 * overlapping locks held by this process on
1162 * fd's open on the same file and creates a
1163 * list of unlock ranges that will allow
1164 * POSIX lock ranges to remain on the file whilst the
1165 * unlocks are performed.
1168 ulist = posix_lock_list(ul_ctx,
1170 lock_ctx, /* Lock context ulist belongs to. */
1176 * If there were any overlapped entries (list is > 1 or size or start have changed),
1177 * and the lock_type we just deleted from
1178 * the upper layer tdb was a write lock, then before doing the unlock we need to downgrade
1179 * the POSIX lock to a read lock. This allows any overlapping read locks
1180 * to be atomically maintained.
1183 if (deleted_lock_type == WRITE_LOCK &&
1184 (!ulist || ulist->next != NULL || ulist->start != offset || ulist->size != count)) {
1186 DEBUG(5,("release_posix_lock_windows_flavour: downgrading lock to READ: offset = %.0f, count = %.0f\n",
1187 (double)offset, (double)count ));
1189 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_RDLCK)) {
1190 DEBUG(0,("release_posix_lock_windows_flavour: downgrade of lock failed with error %s !\n", strerror(errno) ));
1191 talloc_destroy(ul_ctx);
1197 * Release the POSIX locks on the list of ranges returned.
1200 for(; ulist; ulist = ulist->next) {
1201 offset = ulist->start;
1202 count = ulist->size;
1204 DEBUG(5,("release_posix_lock_windows_flavour: Real unlock: offset = %.0f, count = %.0f\n",
1205 (double)offset, (double)count ));
1207 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) {
1212 talloc_destroy(ul_ctx);
1216 /****************************************************************************
1217 Next - the functions that deal with mapping CIFS POSIX locks onto
1218 the underlying system POSIX locks.
1219 ****************************************************************************/
1221 /****************************************************************************
1222 POSIX function to acquire a lock. Returns True if the
1223 lock could be granted, False if not.
1224 As POSIX locks don't stack or conflict (they just overwrite)
1225 we can map the requested lock directly onto a system one. We
1226 know it doesn't conflict with locks on other contexts as the
1227 upper layer would have refused it.
1228 ****************************************************************************/
1230 bool set_posix_lock_posix_flavour(files_struct *fsp,
1233 enum brl_type lock_type,
1238 int posix_lock_type = map_posix_lock_type(fsp,lock_type);
1240 DEBUG(5,("set_posix_lock_posix_flavour: File %s, offset = %.0f, count "
1241 "= %.0f, type = %s\n", fsp_str_dbg(fsp),
1242 (double)u_offset, (double)u_count,
1243 posix_lock_type_name(lock_type)));
1246 * If the requested lock won't fit in the POSIX range, we will
1247 * pretend it was successful.
1250 if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
1254 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) {
1256 DEBUG(5,("set_posix_lock_posix_flavour: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n",
1257 posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) ));
1263 /****************************************************************************
1264 POSIX function to release a lock. Returns True if the
1265 lock could be released, False if not.
1266 We are given a complete lock state from the upper layer which is what the lock
1267 state should be after the unlock has already been done, so what
1268 we do is punch out holes in the unlock range where locks owned by this process
1269 have a different lock context.
1270 ****************************************************************************/
1272 bool release_posix_lock_posix_flavour(files_struct *fsp,
1275 const struct lock_context *lock_ctx,
1276 const struct lock_struct *plocks,
1282 TALLOC_CTX *ul_ctx = NULL;
1283 struct lock_list *ulist = NULL;
1284 struct lock_list *ul = NULL;
1286 DEBUG(5,("release_posix_lock_posix_flavour: File %s, offset = %.0f, "
1287 "count = %.0f\n", fsp_str_dbg(fsp),
1288 (double)u_offset, (double)u_count));
1291 * If the requested lock won't fit in the POSIX range, we will
1292 * pretend it was successful.
1295 if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
1299 if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
1300 DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
1304 if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) {
1305 DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
1306 talloc_destroy(ul_ctx);
1311 * Create the initial list entry containing the
1312 * lock we want to remove.
1319 DLIST_ADD(ulist, ul);
1322 * Walk the given array creating a linked list
1323 * of unlock requests.
1326 ulist = posix_lock_list(ul_ctx,
1328 lock_ctx, /* Lock context ulist belongs to. */
1334 * Release the POSIX locks on the list of ranges returned.
1337 for(; ulist; ulist = ulist->next) {
1338 offset = ulist->start;
1339 count = ulist->size;
1341 DEBUG(5,("release_posix_lock_posix_flavour: Real unlock: offset = %.0f, count = %.0f\n",
1342 (double)offset, (double)count ));
1344 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) {
1349 talloc_destroy(ul_ctx);