*/
bool signaled;
} needs_fence;
+
+ bool per_thread_cwd;
};
static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
return pthreadpool_queued_jobs(pool->pool);
}
+bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool)
+{
+ if (pool->pool == NULL) {
+ return false;
+ }
+
+ return pthreadpool_per_thread_cwd(pool->pool);
+}
+
static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
{
struct pthreadpool_tevent_job *job = NULL;
return tevent_req_post(req, ev);
}
PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(job);
+ job->per_thread_cwd = pthreadpool_tevent_per_thread_cwd(pool);
talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
DLIST_ADD_END(job->pool->jobs, job);
job->state = state;
return true;
}
+bool pthreadpool_tevent_current_job_per_thread_cwd(void)
+{
+ if (current_job == NULL) {
+ /*
+ * Should only be called from within
+ * the job function.
+ */
+ abort();
+ return false;
+ }
+
+ return current_job->per_thread_cwd;
+}
+
static void pthreadpool_tevent_job_fn(void *private_data)
{
struct pthreadpool_tevent_job *job =
size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool);
size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool);
+bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool);
/*
* return true - if tevent_req_cancel() was called.
*/
bool pthreadpool_tevent_current_job_continue(void);
+/*
+ * return true if the current job can rely on a per thread
+ * current working directory.
+ */
+bool pthreadpool_tevent_current_job_per_thread_cwd(void);
+
struct tevent_req *pthreadpool_tevent_job_send(
TALLOC_CTX *mem_ctx, struct tevent_context *ev,
struct pthreadpool_tevent *pool,