2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
32 return talloc_asprintf(mem_ctx,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p]",
35 req, req->internal.create_location,
37 (unsigned long long)req->internal.error,
38 (unsigned long long)req->internal.error,
39 talloc_get_name(req->data),
45 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
47 if (!req->private_print) {
48 return tevent_req_default_print(req, mem_ctx);
51 return req->private_print(req, mem_ctx);
54 static int tevent_req_destructor(struct tevent_req *req);
56 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
62 struct tevent_req *req;
63 void **ppdata = (void **)pdata;
66 req = talloc_pooled_object(
67 mem_ctx, struct tevent_req, 2,
68 sizeof(struct tevent_immediate) + data_size);
73 *req = (struct tevent_req) {
74 .internal.private_type = type,
75 .internal.create_location = location,
76 .internal.state = TEVENT_REQ_IN_PROGRESS,
77 .internal.trigger = tevent_create_immediate(req)
80 data = talloc_zero_size(req, data_size);
83 * No need to check for req->internal.trigger!=NULL or
84 * data!=NULL, this can't fail: talloc_pooled_object has
85 * already allocated sufficient memory.
88 talloc_set_name_const(data, type);
92 talloc_set_destructor(req, tevent_req_destructor);
98 static int tevent_req_destructor(struct tevent_req *req)
100 tevent_req_received(req);
104 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
106 req->internal.finish_location = location;
107 if (req->internal.defer_callback_ev) {
108 (void)tevent_req_post(req, req->internal.defer_callback_ev);
109 req->internal.defer_callback_ev = NULL;
112 if (req->async.fn != NULL) {
117 static void tevent_req_cleanup(struct tevent_req *req)
119 if (req->private_cleanup.fn == NULL) {
123 if (req->private_cleanup.state >= req->internal.state) {
125 * Don't call the cleanup_function multiple times for the same
131 req->private_cleanup.state = req->internal.state;
132 req->private_cleanup.fn(req, req->internal.state);
135 static void tevent_req_finish(struct tevent_req *req,
136 enum tevent_req_state state,
137 const char *location)
140 * make sure we do not timeout after
141 * the request was already finished
143 TALLOC_FREE(req->internal.timer);
145 req->internal.state = state;
146 req->internal.finish_location = location;
148 tevent_req_cleanup(req);
150 _tevent_req_notify_callback(req, location);
153 void _tevent_req_done(struct tevent_req *req,
154 const char *location)
156 tevent_req_finish(req, TEVENT_REQ_DONE, location);
159 bool _tevent_req_error(struct tevent_req *req,
161 const char *location)
167 req->internal.error = error;
168 tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
172 void _tevent_req_oom(struct tevent_req *req, const char *location)
174 tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
177 bool _tevent_req_nomem(const void *p,
178 struct tevent_req *req,
179 const char *location)
184 _tevent_req_oom(req, location);
191 * @brief Immediate event callback.
193 * @param[in] ev The event context to use.
195 * @param[in] im The immediate event.
197 * @param[in] priv The async request to be finished.
199 static void tevent_req_trigger(struct tevent_context *ev,
200 struct tevent_immediate *im,
203 struct tevent_req *req =
204 talloc_get_type_abort(private_data,
207 tevent_req_finish(req, req->internal.state,
208 req->internal.finish_location);
211 struct tevent_req *tevent_req_post(struct tevent_req *req,
212 struct tevent_context *ev)
214 tevent_schedule_immediate(req->internal.trigger,
215 ev, tevent_req_trigger, req);
219 void tevent_req_defer_callback(struct tevent_req *req,
220 struct tevent_context *ev)
222 req->internal.defer_callback_ev = ev;
225 bool tevent_req_is_in_progress(struct tevent_req *req)
227 if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
234 void tevent_req_received(struct tevent_req *req)
236 talloc_set_destructor(req, NULL);
238 req->private_print = NULL;
239 req->private_cancel = NULL;
241 TALLOC_FREE(req->internal.trigger);
242 TALLOC_FREE(req->internal.timer);
244 req->internal.state = TEVENT_REQ_RECEIVED;
246 tevent_req_cleanup(req);
248 TALLOC_FREE(req->data);
251 bool tevent_req_poll(struct tevent_req *req,
252 struct tevent_context *ev)
254 while (tevent_req_is_in_progress(req)) {
257 ret = tevent_loop_once(ev);
266 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
269 if (req->internal.state == TEVENT_REQ_DONE) {
272 if (req->internal.state == TEVENT_REQ_USER_ERROR) {
273 *error = req->internal.error;
275 *state = req->internal.state;
279 static void tevent_req_timedout(struct tevent_context *ev,
280 struct tevent_timer *te,
284 struct tevent_req *req =
285 talloc_get_type_abort(private_data,
288 TALLOC_FREE(req->internal.timer);
290 tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
293 bool tevent_req_set_endtime(struct tevent_req *req,
294 struct tevent_context *ev,
295 struct timeval endtime)
297 TALLOC_FREE(req->internal.timer);
299 req->internal.timer = tevent_add_timer(ev, req, endtime,
302 if (tevent_req_nomem(req->internal.timer, req)) {
309 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
312 req->async.private_data = pvt;
315 void *_tevent_req_callback_data(struct tevent_req *req)
317 return req->async.private_data;
320 void *_tevent_req_data(struct tevent_req *req)
325 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
327 req->private_print = fn;
330 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
332 req->private_cancel = fn;
335 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
337 if (req->private_cancel == NULL) {
341 return req->private_cancel(req);
344 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
346 req->private_cleanup.state = req->internal.state;
347 req->private_cleanup.fn = fn;