pthreadpool: add pthreadpool_tevent_[current_job_]per_thread_cwd()
authorStefan Metzmacher <metze@samba.org>
Thu, 21 Jun 2018 23:02:41 +0000 (01:02 +0200)
committerRalph Boehme <slow@samba.org>
Tue, 24 Jul 2018 15:38:27 +0000 (17:38 +0200)
This can be used to check if worker threads run with
unshare(CLONE_FS).

Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Ralph Boehme <slow@samba.org>
lib/pthreadpool/pthreadpool_tevent.c
lib/pthreadpool/pthreadpool_tevent.h

index 94b6b9ded8ff5e5718c899ecfe0d5c157dda10d8..01e8586b384dac23f3c47c8b00f686b954747eca 100644 (file)
@@ -210,6 +210,8 @@ struct pthreadpool_tevent_job {
                 */
                bool signaled;
        } needs_fence;
+
+       bool per_thread_cwd;
 };
 
 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
@@ -283,6 +285,15 @@ size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool)
        return pthreadpool_queued_jobs(pool->pool);
 }
 
+bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool)
+{
+       if (pool->pool == NULL) {
+               return false;
+       }
+
+       return pthreadpool_per_thread_cwd(pool->pool);
+}
+
 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
 {
        struct pthreadpool_tevent_job *job = NULL;
@@ -701,6 +712,7 @@ struct tevent_req *pthreadpool_tevent_job_send(
                return tevent_req_post(req, ev);
        }
        PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(job);
+       job->per_thread_cwd = pthreadpool_tevent_per_thread_cwd(pool);
        talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
        DLIST_ADD_END(job->pool->jobs, job);
        job->state = state;
@@ -772,6 +784,20 @@ bool pthreadpool_tevent_current_job_continue(void)
        return true;
 }
 
+bool pthreadpool_tevent_current_job_per_thread_cwd(void)
+{
+       if (current_job == NULL) {
+               /*
+                * Should only be called from within
+                * the job function.
+                */
+               abort();
+               return false;
+       }
+
+       return current_job->per_thread_cwd;
+}
+
 static void pthreadpool_tevent_job_fn(void *private_data)
 {
        struct pthreadpool_tevent_job *job =
index 37e491e17c47ab8f8ebb4e69e0461ef7b9176619..ff2ab7cfb73d60ac9503cbe356f2ea38da21a769 100644 (file)
@@ -31,6 +31,7 @@ int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
 
 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool);
 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool);
+bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool);
 
 /*
  * return true - if tevent_req_cancel() was called.
@@ -46,6 +47,12 @@ bool pthreadpool_tevent_current_job_orphaned(void);
  */
 bool pthreadpool_tevent_current_job_continue(void);
 
+/*
+ * return true if the current job can rely on a per thread
+ * current working directory.
+ */
+bool pthreadpool_tevent_current_job_per_thread_cwd(void);
+
 struct tevent_req *pthreadpool_tevent_job_send(
        TALLOC_CTX *mem_ctx, struct tevent_context *ev,
        struct pthreadpool_tevent *pool,