X-Git-Url: http://git.samba.org/?a=blobdiff_plain;f=source3%2Fmodules%2Fvfs_default.c;h=7ce3775e54f0aa7b841a4f6e9a7cd89e398f2b6f;hb=e18610a197aab80a32cae8c1e09b96496679bbad;hp=e2efdabf47ce98d62ebfc67e4bee84dd42fe4c07;hpb=74590c6795f8dac5d62d78273730910819395b89;p=samba.git diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c index e2efdabf47c..7ce3775e54f 100644 --- a/source3/modules/vfs_default.c +++ b/source3/modules/vfs_default.c @@ -498,7 +498,6 @@ static int vfswrap_mkdir(vfs_handle_struct *handle, mode_t mode) { int result; - bool has_dacl = False; const char *path = smb_fname->base_name; char *parent = NULL; @@ -506,7 +505,7 @@ static int vfswrap_mkdir(vfs_handle_struct *handle, if (lp_inherit_acls(SNUM(handle->conn)) && parent_dirname(talloc_tos(), path, &parent, NULL) - && (has_dacl = directory_has_default_acl(handle->conn, parent))) { + && directory_has_default_acl(handle->conn, parent)) { mode = (0777 & lp_directory_mask(SNUM(handle->conn))); } @@ -514,21 +513,6 @@ static int vfswrap_mkdir(vfs_handle_struct *handle, result = mkdir(path, mode); - if (result == 0 && !has_dacl) { - /* - * We need to do this as the default behavior of POSIX ACLs - * is to set the mask to be the requested group permission - * bits, not the group permission bits to be the requested - * group permission bits. This is not what we want, as it will - * mess up any inherited ACL bits that were set. JRA. - */ - int saved_errno = errno; /* We may get ENOSYS */ - if ((SMB_VFS_CHMOD_ACL(handle->conn, smb_fname, mode) == -1) && - (errno == ENOSYS)) { - errno = saved_errno; - } - } - END_PROFILE(syscall_mkdir); return result; } @@ -614,16 +598,6 @@ static int vfswrap_close(vfs_handle_struct *handle, files_struct *fsp) return result; } -static ssize_t vfswrap_read(vfs_handle_struct *handle, files_struct *fsp, void *data, size_t n) -{ - ssize_t result; - - START_PROFILE_BYTES(syscall_read, n); - result = sys_read(fsp->fh->fd, data, n); - END_PROFILE_BYTES(syscall_read); - return result; -} - static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void *data, size_t n, off_t offset) { @@ -636,48 +610,18 @@ static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void if (result == -1 && errno == ESPIPE) { /* Maintain the fiction that pipes can be seeked (sought?) on. */ - result = SMB_VFS_READ(fsp, data, n); + result = sys_read(fsp->fh->fd, data, n); fsp->fh->pos = 0; } #else /* HAVE_PREAD */ - off_t curr; - int lerrno; - - curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR); - if (curr == -1 && errno == ESPIPE) { - /* Maintain the fiction that pipes can be seeked (sought?) on. */ - result = SMB_VFS_READ(fsp, data, n); - fsp->fh->pos = 0; - return result; - } - - if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) { - return -1; - } - - errno = 0; - result = SMB_VFS_READ(fsp, data, n); - lerrno = errno; - - SMB_VFS_LSEEK(fsp, curr, SEEK_SET); - errno = lerrno; - + errno = ENOSYS; + result = -1; #endif /* HAVE_PREAD */ return result; } -static ssize_t vfswrap_write(vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n) -{ - ssize_t result; - - START_PROFILE_BYTES(syscall_write, n); - result = sys_write(fsp->fh->fd, data, n); - END_PROFILE_BYTES(syscall_write); - return result; -} - static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n, off_t offset) { @@ -690,28 +634,12 @@ static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, cons if (result == -1 && errno == ESPIPE) { /* Maintain the fiction that pipes can be sought on. */ - result = SMB_VFS_WRITE(fsp, data, n); + result = sys_write(fsp->fh->fd, data, n); } #else /* HAVE_PWRITE */ - off_t curr; - int lerrno; - - curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR); - if (curr == -1) { - return -1; - } - - if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) { - return -1; - } - - result = SMB_VFS_WRITE(fsp, data, n); - lerrno = errno; - - SMB_VFS_LSEEK(fsp, curr, SEEK_SET); - errno = lerrno; - + errno = ENOSYS; + result = -1; #endif /* HAVE_PWRITE */ return result; @@ -814,8 +742,18 @@ static void vfs_pread_done(struct tevent_req *subreq) TALLOC_FREE(subreq); SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes); talloc_set_destructor(state, NULL); - if (tevent_req_error(req, ret)) { - return; + if (ret != 0) { + if (ret != EAGAIN) { + tevent_req_error(req, ret); + return; + } + /* + * If we get EAGAIN from pthreadpool_tevent_job_recv() this + * means the lower level pthreadpool failed to create a new + * thread. Fallback to sync processing in that case to allow + * some progress for the client. + */ + vfs_pread_do(state); } tevent_req_done(req); @@ -932,8 +870,18 @@ static void vfs_pwrite_done(struct tevent_req *subreq) TALLOC_FREE(subreq); SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes); talloc_set_destructor(state, NULL); - if (tevent_req_error(req, ret)) { - return; + if (ret != 0) { + if (ret != EAGAIN) { + tevent_req_error(req, ret); + return; + } + /* + * If we get EAGAIN from pthreadpool_tevent_job_recv() this + * means the lower level pthreadpool failed to create a new + * thread. Fallback to sync processing in that case to allow + * some progress for the client. + */ + vfs_pwrite_do(state); } tevent_req_done(req); @@ -958,7 +906,7 @@ struct vfswrap_fsync_state { int fd; struct vfs_aio_state vfs_aio_state; - SMBPROFILE_BASIC_ASYNC_STATE(profile_basic); + SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes); }; static void vfs_fsync_do(void *private_data); @@ -981,8 +929,9 @@ static struct tevent_req *vfswrap_fsync_send(struct vfs_handle_struct *handle, state->ret = -1; state->fd = fsp->fh->fd; - SMBPROFILE_BASIC_ASYNC_START(syscall_asys_fsync, profile_p, - state->profile_basic); + SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync, profile_p, + state->profile_bytes, 0); + SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes); subreq = pthreadpool_tevent_job_send( state, ev, handle->conn->sconn->pool, vfs_fsync_do, state); @@ -1003,6 +952,8 @@ static void vfs_fsync_do(void *private_data) struct timespec start_time; struct timespec end_time; + SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes); + PROFILE_TIMESTAMP(&start_time); do { @@ -1016,6 +967,8 @@ static void vfs_fsync_do(void *private_data) PROFILE_TIMESTAMP(&end_time); state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time); + + SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes); } static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state) @@ -1033,10 +986,20 @@ static void vfs_fsync_done(struct tevent_req *subreq) ret = pthreadpool_tevent_job_recv(subreq); TALLOC_FREE(subreq); - SMBPROFILE_BASIC_ASYNC_END(state->profile_basic); + SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes); talloc_set_destructor(state, NULL); - if (tevent_req_error(req, ret)) { - return; + if (ret != 0) { + if (ret != EAGAIN) { + tevent_req_error(req, ret); + return; + } + /* + * If we get EAGAIN from pthreadpool_tevent_job_recv() this + * means the lower level pthreadpool failed to create a new + * thread. Fallback to sync processing in that case to allow + * some progress for the client. + */ + vfs_fsync_do(state); } tevent_req_done(req); @@ -1062,10 +1025,7 @@ static off_t vfswrap_lseek(vfs_handle_struct *handle, files_struct *fsp, off_t o START_PROFILE(syscall_lseek); - /* Cope with 'stat' file opens. */ - if (fsp->fh->fd != -1) - result = lseek(fsp->fh->fd, offset, whence); - + result = lseek(fsp->fh->fd, offset, whence); /* * We want to maintain the fiction that we can seek * on a fifo for file system purposes. This allows @@ -1127,20 +1087,6 @@ static int vfswrap_rename(vfs_handle_struct *handle, return result; } -static int vfswrap_fsync(vfs_handle_struct *handle, files_struct *fsp) -{ -#ifdef HAVE_FSYNC - int result; - - START_PROFILE(syscall_fsync); - result = fsync(fsp->fh->fd); - END_PROFILE(syscall_fsync); - return result; -#else - return 0; -#endif -} - static int vfswrap_stat(vfs_handle_struct *handle, struct smb_filename *smb_fname) { @@ -1409,7 +1355,9 @@ static NTSTATUS vfswrap_fsctl(struct vfs_handle_struct *handle, * * but I have to check that --metze */ + struct sid_parse_ret ret; struct dom_sid sid; + struct dom_sid_buf buf; uid_t uid; size_t sid_len; @@ -1426,14 +1374,16 @@ static NTSTATUS vfswrap_fsctl(struct vfs_handle_struct *handle, /* unknown 4 bytes: this is not the length of the sid :-( */ /*unknown = IVAL(pdata,0);*/ - if (!sid_parse(_in_data + 4, sid_len, &sid)) { + ret = sid_parse(_in_data + 4, sid_len, &sid); + if (ret.len == -1) { return NT_STATUS_INVALID_PARAMETER; } - DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid))); + DEBUGADD(10, ("for SID: %s\n", + dom_sid_str_buf(&sid, &buf))); if (!sid_to_uid(&sid, &uid)) { DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n", - sid_string_dbg(&sid), + dom_sid_str_buf(&sid, &buf), (unsigned long)sid_len)); uid = (-1); } @@ -1567,6 +1517,142 @@ static NTSTATUS vfswrap_get_dos_attributes(struct vfs_handle_struct *handle, return get_ea_dos_attribute(handle->conn, smb_fname, dosmode); } +struct vfswrap_get_dos_attributes_state { + struct vfs_aio_state aio_state; + connection_struct *conn; + TALLOC_CTX *mem_ctx; + struct tevent_context *ev; + files_struct *dir_fsp; + struct smb_filename *smb_fname; + uint32_t dosmode; + bool as_root; +}; + +static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq); + +static struct tevent_req *vfswrap_get_dos_attributes_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct vfs_handle_struct *handle, + files_struct *dir_fsp, + struct smb_filename *smb_fname) +{ + struct tevent_req *req = NULL; + struct tevent_req *subreq = NULL; + struct vfswrap_get_dos_attributes_state *state = NULL; + + req = tevent_req_create(mem_ctx, &state, + struct vfswrap_get_dos_attributes_state); + if (req == NULL) { + return NULL; + } + + *state = (struct vfswrap_get_dos_attributes_state) { + .conn = dir_fsp->conn, + .mem_ctx = mem_ctx, + .ev = ev, + .dir_fsp = dir_fsp, + .smb_fname = smb_fname, + }; + + subreq = SMB_VFS_GETXATTRAT_SEND(state, + ev, + dir_fsp, + smb_fname, + SAMBA_XATTR_DOS_ATTRIB, + sizeof(fstring)); + if (tevent_req_nomem(subreq, req)) { + return tevent_req_post(req, ev); + } + tevent_req_set_callback(subreq, + vfswrap_get_dos_attributes_getxattr_done, + req); + + return req; +} + +static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq) +{ + struct tevent_req *req = + tevent_req_callback_data(subreq, + struct tevent_req); + struct vfswrap_get_dos_attributes_state *state = + tevent_req_data(req, + struct vfswrap_get_dos_attributes_state); + ssize_t xattr_size; + DATA_BLOB blob = {0}; + NTSTATUS status; + + xattr_size = SMB_VFS_GETXATTRAT_RECV(subreq, + &state->aio_state, + state, + &blob.data); + TALLOC_FREE(subreq); + if (xattr_size == -1) { + status = map_nt_error_from_unix(state->aio_state.error); + + if (state->as_root) { + tevent_req_nterror(req, status); + return; + } + if (!NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED)) { + tevent_req_nterror(req, status); + return; + } + + state->as_root = true; + + become_root(); + subreq = SMB_VFS_GETXATTRAT_SEND(state, + state->ev, + state->dir_fsp, + state->smb_fname, + SAMBA_XATTR_DOS_ATTRIB, + sizeof(fstring)); + unbecome_root(); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, + vfswrap_get_dos_attributes_getxattr_done, + req); + return; + } + + blob.length = xattr_size; + + status = parse_dos_attribute_blob(state->smb_fname, + blob, + &state->dosmode); + if (!NT_STATUS_IS_OK(status)) { + tevent_req_nterror(req, status); + return; + } + + tevent_req_done(req); + return; +} + +static NTSTATUS vfswrap_get_dos_attributes_recv(struct tevent_req *req, + struct vfs_aio_state *aio_state, + uint32_t *dosmode) +{ + struct vfswrap_get_dos_attributes_state *state = + tevent_req_data(req, + struct vfswrap_get_dos_attributes_state); + NTSTATUS status; + + if (tevent_req_is_nterror(req, &status)) { + tevent_req_received(req); + return status; + } + + *aio_state = state->aio_state; + *dosmode = state->dosmode; + tevent_req_received(req); + return NT_STATUS_OK; +} + static NTSTATUS vfswrap_fget_dos_attributes(struct vfs_handle_struct *handle, struct files_struct *fsp, uint32_t *dosmode) @@ -1670,13 +1756,14 @@ static NTSTATUS vfswrap_offload_read_recv(struct tevent_req *req, } struct vfswrap_offload_write_state { - struct tevent_context *ev; uint8_t *buf; bool read_lck_locked; bool write_lck_locked; DATA_BLOB *token; + struct tevent_context *src_ev; struct files_struct *src_fsp; off_t src_off; + struct tevent_context *dst_ev; struct files_struct *dst_fsp; off_t dst_off; off_t to_copy; @@ -1684,6 +1771,22 @@ struct vfswrap_offload_write_state { size_t next_io_size; }; +static void vfswrap_offload_write_cleanup(struct tevent_req *req, + enum tevent_req_state req_state) +{ + struct vfswrap_offload_write_state *state = tevent_req_data( + req, struct vfswrap_offload_write_state); + bool ok; + + if (state->dst_fsp == NULL) { + return; + } + + ok = change_to_user_by_fsp(state->dst_fsp); + SMB_ASSERT(ok); + state->dst_fsp = NULL; +} + static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req); static struct tevent_req *vfswrap_offload_write_send( @@ -1702,6 +1805,7 @@ static struct tevent_req *vfswrap_offload_write_send( size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN); files_struct *src_fsp = NULL; NTSTATUS status; + bool ok; req = tevent_req_create(mem_ctx, &state, struct vfswrap_offload_write_state); @@ -1710,15 +1814,17 @@ static struct tevent_req *vfswrap_offload_write_send( } *state = (struct vfswrap_offload_write_state) { - .ev = ev, .token = token, .src_off = transfer_offset, + .dst_ev = ev, .dst_fsp = dest_fsp, .dst_off = dest_off, .to_copy = to_copy, .remaining = to_copy, }; + tevent_req_set_cleanup_fn(req, vfswrap_offload_write_cleanup); + switch (fsctl) { case FSCTL_SRV_COPYCHUNK: case FSCTL_SRV_COPYCHUNK_WRITE: @@ -1752,7 +1858,6 @@ static struct tevent_req *vfswrap_offload_write_send( if (tevent_req_nterror(req, status)) { return tevent_req_post(req, ev); } - state->src_fsp = src_fsp; DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy); @@ -1762,6 +1867,15 @@ static struct tevent_req *vfswrap_offload_write_send( return tevent_req_post(req, ev); } + ok = change_to_user_by_fsp(src_fsp); + if (!ok) { + tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED); + return tevent_req_post(req, ev); + } + + state->src_ev = src_fsp->conn->sconn->ev_ctx; + state->src_fsp = src_fsp; + state->buf = talloc_array(state, uint8_t, num); if (tevent_req_nomem(state->buf, req)) { return tevent_req_post(req, ev); @@ -1786,16 +1900,6 @@ static struct tevent_req *vfswrap_offload_write_send( return tevent_req_post(req, ev); } - if (src_fsp->op == NULL) { - tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); - return tevent_req_post(req, ev); - } - - if (dest_fsp->op == NULL) { - tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); - return tevent_req_post(req, ev); - } - status = vfswrap_offload_write_loop(req); if (!NT_STATUS_IS_OK(status)) { tevent_req_nterror(req, status); @@ -1815,6 +1919,10 @@ static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req) struct lock_struct read_lck; bool ok; + /* + * This is called under the context of state->src_fsp. + */ + state->next_io_size = MIN(state->remaining, talloc_array_length(state->buf)); init_strict_lock_struct(state->src_fsp, @@ -1832,7 +1940,7 @@ static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req) } subreq = SMB_VFS_PREAD_SEND(state, - state->src_fsp->conn->sconn->ev_ctx, + state->src_ev, state->src_fsp, state->buf, state->next_io_size, @@ -1874,6 +1982,12 @@ static void vfswrap_offload_write_read_done(struct tevent_req *subreq) state->src_off += nread; + ok = change_to_user_by_fsp(state->dst_fsp); + if (!ok) { + tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); + return; + } + init_strict_lock_struct(state->dst_fsp, state->dst_fsp->op->global->open_persistent_id, state->dst_off, @@ -1890,7 +2004,7 @@ static void vfswrap_offload_write_read_done(struct tevent_req *subreq) } subreq = SMB_VFS_PWRITE_SEND(state, - state->ev, + state->dst_ev, state->dst_fsp, state->buf, state->next_io_size, @@ -1911,6 +2025,7 @@ static void vfswrap_offload_write_write_done(struct tevent_req *subreq) struct vfs_aio_state aio_state; ssize_t nwritten; NTSTATUS status; + bool ok; nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state); TALLOC_FREE(subreq); @@ -1938,6 +2053,12 @@ static void vfswrap_offload_write_write_done(struct tevent_req *subreq) return; } + ok = change_to_user_by_fsp(state->src_fsp); + if (!ok) { + tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); + return; + } + status = vfswrap_offload_write_loop(req); if (!NT_STATUS_IS_OK(status)) { tevent_req_nterror(req, status); @@ -2069,27 +2190,6 @@ static int vfswrap_chmod(vfs_handle_struct *handle, int result; START_PROFILE(syscall_chmod); - - /* - * We need to do this due to the fact that the default POSIX ACL - * chmod modifies the ACL *mask* for the group owner, not the - * group owner bits directly. JRA. - */ - - - { - int saved_errno = errno; /* We might get ENOSYS */ - result = SMB_VFS_CHMOD_ACL(handle->conn, - smb_fname, - mode); - if (result == 0) { - END_PROFILE(syscall_chmod); - return result; - } - /* Error - return the old errno. */ - errno = saved_errno; - } - result = chmod(smb_fname->base_name, mode); END_PROFILE(syscall_chmod); return result; @@ -2100,23 +2200,6 @@ static int vfswrap_fchmod(vfs_handle_struct *handle, files_struct *fsp, mode_t m int result; START_PROFILE(syscall_fchmod); - - /* - * We need to do this due to the fact that the default POSIX ACL - * chmod modifies the ACL *mask* for the group owner, not the - * group owner bits directly. JRA. - */ - - { - int saved_errno = errno; /* We might get ENOSYS */ - if ((result = SMB_VFS_FCHMOD_ACL(fsp, mode)) == 0) { - END_PROFILE(syscall_fchmod); - return result; - } - /* Error - return the old errno. */ - errno = saved_errno; - } - #if defined(HAVE_FCHMOD) result = fchmod(fsp->fh->fd, mode); #else @@ -2469,11 +2552,8 @@ static bool vfswrap_lock(vfs_handle_struct *handle, files_struct *fsp, int op, o START_PROFILE(syscall_fcntl_lock); - if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn), - "smbd", - "force process locks", - false)) { - op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks); + if (fsp->use_ofd_locks) { + op = map_process_lock_to_ofd_lock(op); } result = fcntl_lock(fsp->fh->fd, op, offset, count, type); @@ -2497,11 +2577,8 @@ static bool vfswrap_getlock(vfs_handle_struct *handle, files_struct *fsp, off_t START_PROFILE(syscall_fcntl_getlock); - if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn), - "smbd", - "force process locks", - false)) { - op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks); + if (fsp->use_ofd_locks) { + op = map_process_lock_to_ofd_lock(op); } result = fcntl_getlock(fsp->fh->fd, op, poffset, pcount, ptype, ppid); @@ -2798,38 +2875,6 @@ static NTSTATUS vfswrap_audit_file(struct vfs_handle_struct *handle, return NT_STATUS_OK; /* Nothing to do here ... */ } -static int vfswrap_chmod_acl(vfs_handle_struct *handle, - const struct smb_filename *smb_fname, - mode_t mode) -{ -#ifdef HAVE_NO_ACL - errno = ENOSYS; - return -1; -#else - int result; - - START_PROFILE(chmod_acl); - result = chmod_acl(handle->conn, smb_fname, mode); - END_PROFILE(chmod_acl); - return result; -#endif -} - -static int vfswrap_fchmod_acl(vfs_handle_struct *handle, files_struct *fsp, mode_t mode) -{ -#ifdef HAVE_NO_ACL - errno = ENOSYS; - return -1; -#else - int result; - - START_PROFILE(fchmod_acl); - result = fchmod_acl(fsp, mode); - END_PROFILE(fchmod_acl); - return result; -#endif -} - static SMB_ACL_T vfswrap_sys_acl_get_file(vfs_handle_struct *handle, const struct smb_filename *smb_fname, SMB_ACL_TYPE_T type, @@ -2877,6 +2922,325 @@ static ssize_t vfswrap_getxattr(struct vfs_handle_struct *handle, return getxattr(smb_fname->base_name, name, value, size); } +struct vfswrap_getxattrat_state { + struct tevent_context *ev; + files_struct *dir_fsp; + const struct smb_filename *smb_fname; + struct tevent_req *req; + + /* + * The following variables are talloced off "state" which is protected + * by a destructor and thus are guaranteed to be safe to be used in the + * job function in the worker thread. + */ + char *name; + const char *xattr_name; + uint8_t *xattr_value; + struct security_unix_token *token; + + ssize_t xattr_size; + struct vfs_aio_state vfs_aio_state; + SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes); +}; + +static int vfswrap_getxattrat_state_destructor( + struct vfswrap_getxattrat_state *state) +{ + return -1; +} + +static void vfswrap_getxattrat_do_sync(struct tevent_req *req); +static void vfswrap_getxattrat_do_async(void *private_data); +static void vfswrap_getxattrat_done(struct tevent_req *subreq); + +static struct tevent_req *vfswrap_getxattrat_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct vfs_handle_struct *handle, + files_struct *dir_fsp, + const struct smb_filename *smb_fname, + const char *xattr_name, + size_t alloc_hint) +{ + struct tevent_req *req = NULL; + struct tevent_req *subreq = NULL; + struct vfswrap_getxattrat_state *state = NULL; + size_t max_threads = 0; + bool have_per_thread_cwd = false; + bool have_per_thread_creds = false; + bool do_async = false; + + req = tevent_req_create(mem_ctx, &state, + struct vfswrap_getxattrat_state); + if (req == NULL) { + return NULL; + } + *state = (struct vfswrap_getxattrat_state) { + .ev = ev, + .dir_fsp = dir_fsp, + .smb_fname = smb_fname, + .req = req, + }; + + max_threads = pthreadpool_tevent_max_threads(dir_fsp->conn->sconn->pool); + if (max_threads >= 1) { + /* + * We need a non sync threadpool! + */ + have_per_thread_cwd = per_thread_cwd_supported(); + } +#ifdef HAVE_LINUX_THREAD_CREDENTIALS + have_per_thread_creds = true; +#endif + if (have_per_thread_cwd && have_per_thread_creds) { + do_async = true; + } + + SMBPROFILE_BYTES_ASYNC_START(syscall_asys_getxattrat, profile_p, + state->profile_bytes, 0); + + if (dir_fsp->fh->fd == -1) { + DBG_ERR("Need a valid directory fd\n"); + tevent_req_error(req, EINVAL); + return tevent_req_post(req, ev); + } + + if (alloc_hint > 0) { + state->xattr_value = talloc_zero_array(state, + uint8_t, + alloc_hint); + if (tevent_req_nomem(state->xattr_value, req)) { + return tevent_req_post(req, ev); + } + } + + if (!do_async) { + vfswrap_getxattrat_do_sync(req); + return tevent_req_post(req, ev); + } + + /* + * Now allocate all parameters from a memory context that won't go away + * no matter what. These paremeters will get used in threads and we + * can't reliably cancel threads, so all buffers passed to the threads + * must not be freed before all referencing threads terminate. + */ + + state->name = talloc_strdup(state, smb_fname->base_name); + if (tevent_req_nomem(state->name, req)) { + return tevent_req_post(req, ev); + } + + state->xattr_name = talloc_strdup(state, xattr_name); + if (tevent_req_nomem(state->xattr_name, req)) { + return tevent_req_post(req, ev); + } + + /* + * This is a hot codepath so at first glance one might think we should + * somehow optimize away the token allocation and do a + * talloc_reference() or similar black magic instead. But due to the + * talloc_stackframe pool per SMB2 request this should be a simple copy + * without a malloc in most cases. + */ + if (geteuid() == sec_initial_uid()) { + state->token = root_unix_token(state); + } else { + state->token = copy_unix_token( + state, + dir_fsp->conn->session_info->unix_token); + } + if (tevent_req_nomem(state->token, req)) { + return tevent_req_post(req, ev); + } + + SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes); + + subreq = pthreadpool_tevent_job_send( + state, + ev, + dir_fsp->conn->sconn->pool, + vfswrap_getxattrat_do_async, + state); + if (tevent_req_nomem(subreq, req)) { + return tevent_req_post(req, ev); + } + tevent_req_set_callback(subreq, vfswrap_getxattrat_done, req); + + talloc_set_destructor(state, vfswrap_getxattrat_state_destructor); + + return req; +} + +static void vfswrap_getxattrat_do_sync(struct tevent_req *req) +{ + struct vfswrap_getxattrat_state *state = talloc_get_type_abort( + req, struct vfswrap_getxattrat_state); + char *path = NULL; + char *tofree = NULL; + char pathbuf[PATH_MAX+1]; + size_t pathlen; + int err; + + pathlen = full_path_tos(state->dir_fsp->fsp_name->base_name, + state->smb_fname->base_name, + pathbuf, + sizeof(pathbuf), + &path, + &tofree); + if (pathlen == -1) { + tevent_req_error(req, ENOMEM); + return; + } + + state->xattr_size = getxattr(path, + state->xattr_name, + state->xattr_value, + talloc_array_length(state->xattr_value)); + err = errno; + TALLOC_FREE(tofree); + if (state->xattr_size == -1) { + tevent_req_error(req, err); + return; + } + + tevent_req_done(req); + return; +} + +static void vfswrap_getxattrat_do_async(void *private_data) +{ + struct vfswrap_getxattrat_state *state = talloc_get_type_abort( + private_data, struct vfswrap_getxattrat_state); + struct timespec start_time; + struct timespec end_time; + int ret; + + PROFILE_TIMESTAMP(&start_time); + SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes); + + /* + * Here we simulate a getxattrat() + * call using fchdir();getxattr() + */ + + per_thread_cwd_activate(); + + /* Become the correct credential on this thread. */ + ret = set_thread_credentials(state->token->uid, + state->token->gid, + (size_t)state->token->ngroups, + state->token->groups); + if (ret != 0) { + state->xattr_size = -1; + state->vfs_aio_state.error = errno; + goto end_profile; + } + + ret = fchdir(state->dir_fsp->fh->fd); + if (ret == -1) { + state->xattr_size = -1; + state->vfs_aio_state.error = errno; + goto end_profile; + } + + state->xattr_size = getxattr(state->name, + state->xattr_name, + state->xattr_value, + talloc_array_length(state->xattr_value)); + if (state->xattr_size == -1) { + state->vfs_aio_state.error = errno; + } + +end_profile: + PROFILE_TIMESTAMP(&end_time); + state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time); + SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes); +} + +static void vfswrap_getxattrat_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct vfswrap_getxattrat_state *state = tevent_req_data( + req, struct vfswrap_getxattrat_state); + int ret; + bool ok; + + /* + * Make sure we run as the user again + */ + ok = change_to_user_by_fsp(state->dir_fsp); + SMB_ASSERT(ok); + + ret = pthreadpool_tevent_job_recv(subreq); + TALLOC_FREE(subreq); + SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes); + talloc_set_destructor(state, NULL); + if (ret != 0) { + if (ret != EAGAIN) { + tevent_req_error(req, ret); + return; + } + /* + * If we get EAGAIN from pthreadpool_tevent_job_recv() this + * means the lower level pthreadpool failed to create a new + * thread. Fallback to sync processing in that case to allow + * some progress for the client. + */ + vfswrap_getxattrat_do_sync(req); + return; + } + + if (state->xattr_size == -1) { + tevent_req_error(req, state->vfs_aio_state.error); + return; + } + + if (state->xattr_value == NULL) { + /* + * The caller only wanted the size. + */ + tevent_req_done(req); + return; + } + + /* + * shrink the buffer to the returned size. + * (can't fail). It means NULL if size is 0. + */ + state->xattr_value = talloc_realloc(state, + state->xattr_value, + uint8_t, + state->xattr_size); + + tevent_req_done(req); +} + +static ssize_t vfswrap_getxattrat_recv(struct tevent_req *req, + struct vfs_aio_state *aio_state, + TALLOC_CTX *mem_ctx, + uint8_t **xattr_value) +{ + struct vfswrap_getxattrat_state *state = tevent_req_data( + req, struct vfswrap_getxattrat_state); + ssize_t xattr_size; + + if (tevent_req_is_unix_error(req, &aio_state->error)) { + tevent_req_received(req); + return -1; + } + + *aio_state = state->vfs_aio_state; + xattr_size = state->xattr_size; + if (xattr_value != NULL) { + *xattr_value = talloc_move(mem_ctx, &state->xattr_value); + } + + tevent_req_received(req); + return xattr_size; +} + static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size) { return fgetxattr(fsp->fh->fd, name, value, size); @@ -3024,11 +3388,9 @@ static struct vfs_fn_pointers vfs_default_fns = { .open_fn = vfswrap_open, .create_file_fn = vfswrap_create_file, .close_fn = vfswrap_close, - .read_fn = vfswrap_read, .pread_fn = vfswrap_pread, .pread_send_fn = vfswrap_pread_send, .pread_recv_fn = vfswrap_pread_recv, - .write_fn = vfswrap_write, .pwrite_fn = vfswrap_pwrite, .pwrite_send_fn = vfswrap_pwrite_send, .pwrite_recv_fn = vfswrap_pwrite_recv, @@ -3036,7 +3398,6 @@ static struct vfs_fn_pointers vfs_default_fns = { .sendfile_fn = vfswrap_sendfile, .recvfile_fn = vfswrap_recvfile, .rename_fn = vfswrap_rename, - .fsync_fn = vfswrap_fsync, .fsync_send_fn = vfswrap_fsync_send, .fsync_recv_fn = vfswrap_fsync_recv, .stat_fn = vfswrap_stat, @@ -3077,6 +3438,8 @@ static struct vfs_fn_pointers vfs_default_fns = { .set_dos_attributes_fn = vfswrap_set_dos_attributes, .fset_dos_attributes_fn = vfswrap_fset_dos_attributes, .get_dos_attributes_fn = vfswrap_get_dos_attributes, + .get_dos_attributes_send_fn = vfswrap_get_dos_attributes_send, + .get_dos_attributes_recv_fn = vfswrap_get_dos_attributes_recv, .fget_dos_attributes_fn = vfswrap_fget_dos_attributes, .offload_read_send_fn = vfswrap_offload_read_send, .offload_read_recv_fn = vfswrap_offload_read_recv, @@ -3094,9 +3457,6 @@ static struct vfs_fn_pointers vfs_default_fns = { /* POSIX ACL operations. */ - .chmod_acl_fn = vfswrap_chmod_acl, - .fchmod_acl_fn = vfswrap_fchmod_acl, - .sys_acl_get_file_fn = vfswrap_sys_acl_get_file, .sys_acl_get_fd_fn = vfswrap_sys_acl_get_fd, .sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file, @@ -3107,6 +3467,8 @@ static struct vfs_fn_pointers vfs_default_fns = { /* EA operations. */ .getxattr_fn = vfswrap_getxattr, + .getxattrat_send_fn = vfswrap_getxattrat_send, + .getxattrat_recv_fn = vfswrap_getxattrat_recv, .fgetxattr_fn = vfswrap_fgetxattr, .listxattr_fn = vfswrap_listxattr, .flistxattr_fn = vfswrap_flistxattr, @@ -3127,6 +3489,12 @@ static struct vfs_fn_pointers vfs_default_fns = { static_decl_vfs; NTSTATUS vfs_default_init(TALLOC_CTX *ctx) { + /* + * Here we need to implement every call! + * + * As this is the end of the vfs module chain. + */ + smb_vfs_assert_all_fns(&vfs_default_fns, DEFAULT_VFS_MODULE_NAME); return smb_register_vfs(SMB_VFS_INTERFACE_VERSION, DEFAULT_VFS_MODULE_NAME, &vfs_default_fns); }