tevent: Save 140 bytes of .text in tevent_req_create
[metze/samba/wip.git] / lib / tevent / tevent_req.c
1 /*
2    Unix SMB/CIFS implementation.
3    Infrastructure for async requests
4    Copyright (C) Volker Lendecke 2008
5    Copyright (C) Stefan Metzmacher 2009
6
7      ** NOTE! The following LGPL license applies to the tevent
8      ** library. This does NOT imply that all of Samba is released
9      ** under the LGPL
10
11    This library is free software; you can redistribute it and/or
12    modify it under the terms of the GNU Lesser General Public
13    License as published by the Free Software Foundation; either
14    version 3 of the License, or (at your option) any later version.
15
16    This library is distributed in the hope that it will be useful,
17    but WITHOUT ANY WARRANTY; without even the implied warranty of
18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19    Lesser General Public License for more details.
20
21    You should have received a copy of the GNU Lesser General Public
22    License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
29
30 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
31 {
32         return talloc_asprintf(mem_ctx,
33                                "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34                                " state[%s (%p)] timer[%p]",
35                                req, req->internal.create_location,
36                                req->internal.state,
37                                (unsigned long long)req->internal.error,
38                                (unsigned long long)req->internal.error,
39                                talloc_get_name(req->data),
40                                req->data,
41                                req->internal.timer
42                                );
43 }
44
45 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
46 {
47         if (!req->private_print) {
48                 return tevent_req_default_print(req, mem_ctx);
49         }
50
51         return req->private_print(req, mem_ctx);
52 }
53
54 static int tevent_req_destructor(struct tevent_req *req);
55
56 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
57                                     void *pdata,
58                                     size_t data_size,
59                                     const char *type,
60                                     const char *location)
61 {
62         struct tevent_req *req;
63         void **ppdata = (void **)pdata;
64         void *data;
65
66         req = talloc_pooled_object(
67                 mem_ctx, struct tevent_req, 2,
68                 sizeof(struct tevent_immediate) + data_size);
69         if (req == NULL) {
70                 return NULL;
71         }
72
73         *req = (struct tevent_req) {
74                 .internal.private_type          = type,
75                 .internal.create_location       = location,
76                 .internal.state                 = TEVENT_REQ_IN_PROGRESS,
77                 .internal.trigger               = tevent_create_immediate(req)
78         };
79
80         data = talloc_zero_size(req, data_size);
81
82         /*
83          * No need to check for req->internal.trigger!=NULL or
84          * data!=NULL, this can't fail: talloc_pooled_object has
85          * already allocated sufficient memory.
86          */
87
88         talloc_set_name_const(data, type);
89
90         req->data = data;
91
92         talloc_set_destructor(req, tevent_req_destructor);
93
94         *ppdata = data;
95         return req;
96 }
97
98 static int tevent_req_destructor(struct tevent_req *req)
99 {
100         tevent_req_received(req);
101         return 0;
102 }
103
104 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
105 {
106         req->internal.finish_location = location;
107         if (req->internal.defer_callback_ev) {
108                 (void)tevent_req_post(req, req->internal.defer_callback_ev);
109                 req->internal.defer_callback_ev = NULL;
110                 return;
111         }
112         if (req->async.fn != NULL) {
113                 req->async.fn(req);
114         }
115 }
116
117 static void tevent_req_cleanup(struct tevent_req *req)
118 {
119         if (req->private_cleanup.fn == NULL) {
120                 return;
121         }
122
123         if (req->private_cleanup.state >= req->internal.state) {
124                 /*
125                  * Don't call the cleanup_function multiple times for the same
126                  * state recursively
127                  */
128                 return;
129         }
130
131         req->private_cleanup.state = req->internal.state;
132         req->private_cleanup.fn(req, req->internal.state);
133 }
134
135 static void tevent_req_finish(struct tevent_req *req,
136                               enum tevent_req_state state,
137                               const char *location)
138 {
139         /*
140          * make sure we do not timeout after
141          * the request was already finished
142          */
143         TALLOC_FREE(req->internal.timer);
144
145         req->internal.state = state;
146         req->internal.finish_location = location;
147
148         tevent_req_cleanup(req);
149
150         _tevent_req_notify_callback(req, location);
151 }
152
153 void _tevent_req_done(struct tevent_req *req,
154                       const char *location)
155 {
156         tevent_req_finish(req, TEVENT_REQ_DONE, location);
157 }
158
159 bool _tevent_req_error(struct tevent_req *req,
160                        uint64_t error,
161                        const char *location)
162 {
163         if (error == 0) {
164                 return false;
165         }
166
167         req->internal.error = error;
168         tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
169         return true;
170 }
171
172 void _tevent_req_oom(struct tevent_req *req, const char *location)
173 {
174         tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
175 }
176
177 bool _tevent_req_nomem(const void *p,
178                        struct tevent_req *req,
179                        const char *location)
180 {
181         if (p != NULL) {
182                 return false;
183         }
184         _tevent_req_oom(req, location);
185         return true;
186 }
187
188 /**
189  * @internal
190  *
191  * @brief Immediate event callback.
192  *
193  * @param[in]  ev       The event context to use.
194  *
195  * @param[in]  im       The immediate event.
196  *
197  * @param[in]  priv     The async request to be finished.
198  */
199 static void tevent_req_trigger(struct tevent_context *ev,
200                                struct tevent_immediate *im,
201                                void *private_data)
202 {
203         struct tevent_req *req =
204                 talloc_get_type_abort(private_data,
205                 struct tevent_req);
206
207         tevent_req_finish(req, req->internal.state,
208                           req->internal.finish_location);
209 }
210
211 struct tevent_req *tevent_req_post(struct tevent_req *req,
212                                    struct tevent_context *ev)
213 {
214         tevent_schedule_immediate(req->internal.trigger,
215                                   ev, tevent_req_trigger, req);
216         return req;
217 }
218
219 void tevent_req_defer_callback(struct tevent_req *req,
220                                struct tevent_context *ev)
221 {
222         req->internal.defer_callback_ev = ev;
223 }
224
225 bool tevent_req_is_in_progress(struct tevent_req *req)
226 {
227         if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
228                 return true;
229         }
230
231         return false;
232 }
233
234 void tevent_req_received(struct tevent_req *req)
235 {
236         talloc_set_destructor(req, NULL);
237
238         req->private_print = NULL;
239         req->private_cancel = NULL;
240
241         TALLOC_FREE(req->internal.trigger);
242         TALLOC_FREE(req->internal.timer);
243
244         req->internal.state = TEVENT_REQ_RECEIVED;
245
246         tevent_req_cleanup(req);
247
248         TALLOC_FREE(req->data);
249 }
250
251 bool tevent_req_poll(struct tevent_req *req,
252                      struct tevent_context *ev)
253 {
254         while (tevent_req_is_in_progress(req)) {
255                 int ret;
256
257                 ret = tevent_loop_once(ev);
258                 if (ret != 0) {
259                         return false;
260                 }
261         }
262
263         return true;
264 }
265
266 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
267                         uint64_t *error)
268 {
269         if (req->internal.state == TEVENT_REQ_DONE) {
270                 return false;
271         }
272         if (req->internal.state == TEVENT_REQ_USER_ERROR) {
273                 *error = req->internal.error;
274         }
275         *state = req->internal.state;
276         return true;
277 }
278
279 static void tevent_req_timedout(struct tevent_context *ev,
280                                struct tevent_timer *te,
281                                struct timeval now,
282                                void *private_data)
283 {
284         struct tevent_req *req =
285                 talloc_get_type_abort(private_data,
286                 struct tevent_req);
287
288         TALLOC_FREE(req->internal.timer);
289
290         tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
291 }
292
293 bool tevent_req_set_endtime(struct tevent_req *req,
294                             struct tevent_context *ev,
295                             struct timeval endtime)
296 {
297         TALLOC_FREE(req->internal.timer);
298
299         req->internal.timer = tevent_add_timer(ev, req, endtime,
300                                                tevent_req_timedout,
301                                                req);
302         if (tevent_req_nomem(req->internal.timer, req)) {
303                 return false;
304         }
305
306         return true;
307 }
308
309 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
310 {
311         req->async.fn = fn;
312         req->async.private_data = pvt;
313 }
314
315 void *_tevent_req_callback_data(struct tevent_req *req)
316 {
317         return req->async.private_data;
318 }
319
320 void *_tevent_req_data(struct tevent_req *req)
321 {
322         return req->data;
323 }
324
325 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
326 {
327         req->private_print = fn;
328 }
329
330 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
331 {
332         req->private_cancel = fn;
333 }
334
335 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
336 {
337         if (req->private_cancel == NULL) {
338                 return false;
339         }
340
341         return req->private_cancel(req);
342 }
343
344 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
345 {
346         req->private_cleanup.state = req->internal.state;
347         req->private_cleanup.fn = fn;
348 }