tstream_readline...
[metze/samba/wip.git] / lib / tsocket / tsocket_helpers.c
index 1876dc3882f75f67d173dcf9b7fadfd049f3f7ea..575efc96074702cbb5c7c3895328c99b8c8ea61d 100644 (file)
@@ -3,7 +3,7 @@
 
    Copyright (C) Stefan Metzmacher 2009
 
-     ** NOTE! The following LGPL license applies to the tevent
+     ** NOTE! The following LGPL license applies to the tsocket
      ** library. This does NOT imply that all of Samba is released
      ** under the LGPL
 
@@ -22,6 +22,7 @@
 */
 
 #include "replace.h"
+#include "system/filesys.h"
 #include "tsocket.h"
 #include "tsocket_internal.h"
 
@@ -41,24 +42,6 @@ static void tdgram_sendto_queue_trigger(struct tevent_req *req,
                                         void *private_data);
 static void tdgram_sendto_queue_done(struct tevent_req *subreq);
 
-/**
- * @brief Queue a dgram blob for sending through the socket
- * @param[in] mem_ctx  The memory context for the result
- * @param[in] ev       The event context the operation should work on
- * @param[in] dgram    The tdgram_context to send the message buffer
- * @param[in] queue    The existing dgram queue
- * @param[in] buf      The message buffer
- * @param[in] len      The message length
- * @param[in] dst      The destination socket address
- * @retval             The async request handle
- *
- * This function queues a blob for sending to destination through an existing
- * dgram socket. The async callback is triggered when the whole blob is
- * delivered to the underlying system socket.
- *
- * The caller needs to make sure that all non-scalar input parameters hang
- * arround for the whole lifetime of the request.
- */
 struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
                                            struct tevent_context *ev,
                                            struct tdgram_context *dgram,
@@ -69,7 +52,7 @@ struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
 {
        struct tevent_req *req;
        struct tdgram_sendto_queue_state *state;
-       bool ok;
+       struct tevent_queue_entry *e;
 
        req = tevent_req_create(mem_ctx, &state,
                                struct tdgram_sendto_queue_state);
@@ -84,21 +67,24 @@ struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
        state->caller.dst       = dst;
        state->ret              = -1;
 
-       ok = tevent_queue_add(queue,
-                             ev,
-                             req,
-                             tdgram_sendto_queue_trigger,
-                             NULL);
-       if (!ok) {
-               tevent_req_nomem(NULL, req);
-               goto post;
+       /*
+        * we use tevent_queue_add_optimize_empty() with allow_direct
+        * in order to optimize for the empty queue case.
+        */
+       e = tevent_queue_add_optimize_empty(
+                               queue,
+                               ev,
+                               req,
+                               tdgram_sendto_queue_trigger,
+                               NULL);
+       if (tevent_req_nomem(e, req)) {
+               return tevent_req_post(req, ev);
+       }
+       if (!tevent_req_is_in_progress(req)) {
+               return tevent_req_post(req, ev);
        }
 
        return req;
-
- post:
-       tevent_req_post(req, ev);
-       return req;
 }
 
 static void tdgram_sendto_queue_trigger(struct tevent_req *req,
@@ -155,3 +141,619 @@ ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno)
        return ret;
 }
 
+struct tstream_readv_pdu_state {
+       /* this structs are owned by the caller */
+       struct {
+               struct tevent_context *ev;
+               struct tstream_context *stream;
+               tstream_readv_pdu_next_vector_t next_vector_fn;
+               void *next_vector_private;
+       } caller;
+
+       /*
+        * Each call to the callback resets iov and count
+        * the callback allocated the iov as child of our state,
+        * that means we are allowed to modify and free it.
+        *
+        * we should call the callback every time we filled the given
+        * vector and ask for a new vector. We return if the callback
+        * ask for 0 bytes.
+        */
+       struct iovec *vector;
+       size_t count;
+
+       /*
+        * the total number of bytes we read,
+        * the return value of the _recv function
+        */
+       int total_read;
+};
+
+static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req);
+static void tstream_readv_pdu_readv_done(struct tevent_req *subreq);
+
+struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx,
+                               struct tevent_context *ev,
+                               struct tstream_context *stream,
+                               tstream_readv_pdu_next_vector_t next_vector_fn,
+                               void *next_vector_private)
+{
+       struct tevent_req *req;
+       struct tstream_readv_pdu_state *state;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct tstream_readv_pdu_state);
+       if (!req) {
+               return NULL;
+       }
+
+       state->caller.ev                        = ev;
+       state->caller.stream                    = stream;
+       state->caller.next_vector_fn            = next_vector_fn;
+       state->caller.next_vector_private       = next_vector_private;
+
+       state->vector           = NULL;
+       state->count            = 0;
+       state->total_read       = 0;
+
+       tstream_readv_pdu_ask_for_next_vector(req);
+       if (!tevent_req_is_in_progress(req)) {
+               goto post;
+       }
+
+       return req;
+
+ post:
+       return tevent_req_post(req, ev);
+}
+
+static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
+{
+       struct tstream_readv_pdu_state *state = tevent_req_data(req,
+                                           struct tstream_readv_pdu_state);
+       int ret;
+       size_t to_read = 0;
+       size_t i;
+       struct tevent_req *subreq;
+       bool optimize = false;
+       bool save_optimize = false;
+
+       if (state->count > 0) {
+               /*
+                * This is not the first time we asked for a vector,
+                * which means parts of the pdu already arrived.
+                *
+                * In this case it make sense to enable
+                * a syscall/performance optimization if the
+                * low level tstream implementation supports it.
+                */
+               optimize = true;
+       }
+
+       TALLOC_FREE(state->vector);
+       state->count = 0;
+
+       ret = state->caller.next_vector_fn(state->caller.stream,
+                                          state->caller.next_vector_private,
+                                          state, &state->vector, &state->count);
+       if (ret == -1) {
+               tevent_req_error(req, errno);
+               return;
+       }
+
+       if (state->count == 0) {
+               tevent_req_done(req);
+               return;
+       }
+
+       for (i=0; i < state->count; i++) {
+               size_t tmp = to_read;
+               tmp += state->vector[i].iov_len;
+
+               if (tmp < to_read) {
+                       tevent_req_error(req, EMSGSIZE);
+                       return;
+               }
+
+               to_read = tmp;
+       }
+
+       /*
+        * this is invalid the next vector function should have
+        * reported count == 0.
+        */
+       if (to_read == 0) {
+               tevent_req_error(req, EINVAL);
+               return;
+       }
+
+       if (state->total_read + to_read < state->total_read) {
+               tevent_req_error(req, EMSGSIZE);
+               return;
+       }
+
+       if (optimize) {
+               /*
+                * If the low level stream is a bsd socket
+                * we will get syscall optimization.
+                *
+                * If it is not a bsd socket
+                * tstream_bsd_optimize_readv() just returns.
+                */
+               save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
+                                                          true);
+       }
+       subreq = tstream_readv_send(state,
+                                   state->caller.ev,
+                                   state->caller.stream,
+                                   state->vector,
+                                   state->count);
+       if (optimize) {
+               tstream_bsd_optimize_readv(state->caller.stream,
+                                          save_optimize);
+       }
+       if (tevent_req_nomem(subreq, req)) {
+               return;
+       }
+       tevent_req_set_callback(subreq, tstream_readv_pdu_readv_done, req);
+}
+
+static void tstream_readv_pdu_readv_done(struct tevent_req *subreq)
+{
+       struct tevent_req *req = tevent_req_callback_data(subreq,
+                                struct tevent_req);
+       struct tstream_readv_pdu_state *state = tevent_req_data(req,
+                                           struct tstream_readv_pdu_state);
+       int ret;
+       int sys_errno;
+
+       ret = tstream_readv_recv(subreq, &sys_errno);
+       if (ret == -1) {
+               tevent_req_error(req, sys_errno);
+               return;
+       }
+
+       state->total_read += ret;
+
+       /* ask the callback for a new vector we should fill */
+       tstream_readv_pdu_ask_for_next_vector(req);
+}
+
+int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno)
+{
+       struct tstream_readv_pdu_state *state = tevent_req_data(req,
+                                           struct tstream_readv_pdu_state);
+       int ret;
+
+       ret = tsocket_simple_int_recv(req, perrno);
+       if (ret == 0) {
+               ret = state->total_read;
+       }
+
+       tevent_req_received(req);
+       return ret;
+}
+
+struct tstream_readv_pdu_queue_state {
+       /* this structs are owned by the caller */
+       struct {
+               struct tevent_context *ev;
+               struct tstream_context *stream;
+               tstream_readv_pdu_next_vector_t next_vector_fn;
+               void *next_vector_private;
+       } caller;
+       int ret;
+};
+
+static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
+                                        void *private_data);
+static void tstream_readv_pdu_queue_done(struct tevent_req *subreq);
+
+struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx,
+                               struct tevent_context *ev,
+                               struct tstream_context *stream,
+                               struct tevent_queue *queue,
+                               tstream_readv_pdu_next_vector_t next_vector_fn,
+                               void *next_vector_private)
+{
+       struct tevent_req *req;
+       struct tstream_readv_pdu_queue_state *state;
+       struct tevent_queue_entry *e;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct tstream_readv_pdu_queue_state);
+       if (!req) {
+               return NULL;
+       }
+
+       state->caller.ev                        = ev;
+       state->caller.stream                    = stream;
+       state->caller.next_vector_fn            = next_vector_fn;
+       state->caller.next_vector_private       = next_vector_private;
+       state->ret                              = -1;
+
+       /*
+        * we use tevent_queue_add_optimize_empty() with allow_direct
+        * in order to optimize for the empty queue case.
+        */
+       e = tevent_queue_add_optimize_empty(
+                               queue,
+                               ev,
+                               req,
+                               tstream_readv_pdu_queue_trigger,
+                               NULL);
+       if (tevent_req_nomem(e, req)) {
+               return tevent_req_post(req, ev);
+       }
+       if (!tevent_req_is_in_progress(req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       return req;
+}
+
+static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
+                                        void *private_data)
+{
+       struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
+                                       struct tstream_readv_pdu_queue_state);
+       struct tevent_req *subreq;
+
+       subreq = tstream_readv_pdu_send(state,
+                                       state->caller.ev,
+                                       state->caller.stream,
+                                       state->caller.next_vector_fn,
+                                       state->caller.next_vector_private);
+       if (tevent_req_nomem(subreq, req)) {
+               return;
+       }
+       tevent_req_set_callback(subreq, tstream_readv_pdu_queue_done ,req);
+}
+
+static void tstream_readv_pdu_queue_done(struct tevent_req *subreq)
+{
+       struct tevent_req *req = tevent_req_callback_data(subreq,
+                                struct tevent_req);
+       struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
+                                       struct tstream_readv_pdu_queue_state);
+       int ret;
+       int sys_errno;
+
+       ret = tstream_readv_pdu_recv(subreq, &sys_errno);
+       talloc_free(subreq);
+       if (ret == -1) {
+               tevent_req_error(req, sys_errno);
+               return;
+       }
+       state->ret = ret;
+
+       tevent_req_done(req);
+}
+
+int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno)
+{
+       struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
+                                       struct tstream_readv_pdu_queue_state);
+       int ret;
+
+       ret = tsocket_simple_int_recv(req, perrno);
+       if (ret == 0) {
+               ret = state->ret;
+       }
+
+       tevent_req_received(req);
+       return ret;
+}
+
+struct tstream_writev_queue_state {
+       /* this structs are owned by the caller */
+       struct {
+               struct tevent_context *ev;
+               struct tstream_context *stream;
+               const struct iovec *vector;
+               size_t count;
+       } caller;
+       int ret;
+};
+
+static void tstream_writev_queue_trigger(struct tevent_req *req,
+                                        void *private_data);
+static void tstream_writev_queue_done(struct tevent_req *subreq);
+
+struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx,
+                                            struct tevent_context *ev,
+                                            struct tstream_context *stream,
+                                            struct tevent_queue *queue,
+                                            const struct iovec *vector,
+                                            size_t count)
+{
+       struct tevent_req *req;
+       struct tstream_writev_queue_state *state;
+       struct tevent_queue_entry *e;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct tstream_writev_queue_state);
+       if (!req) {
+               return NULL;
+       }
+
+       state->caller.ev        = ev;
+       state->caller.stream    = stream;
+       state->caller.vector    = vector;
+       state->caller.count     = count;
+       state->ret              = -1;
+
+       /*
+        * we use tevent_queue_add_optimize_empty() with allow_direct
+        * in order to optimize for the empty queue case.
+        */
+       e = tevent_queue_add_optimize_empty(
+                               queue,
+                               ev,
+                               req,
+                               tstream_writev_queue_trigger,
+                               NULL);
+       if (tevent_req_nomem(e, req)) {
+               return tevent_req_post(req, ev);
+       }
+       if (!tevent_req_is_in_progress(req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       return req;
+}
+
+static void tstream_writev_queue_trigger(struct tevent_req *req,
+                                        void *private_data)
+{
+       struct tstream_writev_queue_state *state = tevent_req_data(req,
+                                       struct tstream_writev_queue_state);
+       struct tevent_req *subreq;
+
+       subreq = tstream_writev_send(state,
+                                    state->caller.ev,
+                                    state->caller.stream,
+                                    state->caller.vector,
+                                    state->caller.count);
+       if (tevent_req_nomem(subreq, req)) {
+               return;
+       }
+       tevent_req_set_callback(subreq, tstream_writev_queue_done ,req);
+}
+
+static void tstream_writev_queue_done(struct tevent_req *subreq)
+{
+       struct tevent_req *req = tevent_req_callback_data(subreq,
+                                struct tevent_req);
+       struct tstream_writev_queue_state *state = tevent_req_data(req,
+                                       struct tstream_writev_queue_state);
+       int ret;
+       int sys_errno;
+
+       ret = tstream_writev_recv(subreq, &sys_errno);
+       talloc_free(subreq);
+       if (ret == -1) {
+               tevent_req_error(req, sys_errno);
+               return;
+       }
+       state->ret = ret;
+
+       tevent_req_done(req);
+}
+
+int tstream_writev_queue_recv(struct tevent_req *req, int *perrno)
+{
+       struct tstream_writev_queue_state *state = tevent_req_data(req,
+                                       struct tstream_writev_queue_state);
+       int ret;
+
+       ret = tsocket_simple_int_recv(req, perrno);
+       if (ret == 0) {
+               ret = state->ret;
+       }
+
+       tevent_req_received(req);
+       return ret;
+}
+
+struct tstream_readline_context {
+       size_t chunk_size;
+       uint8_t *next_buffer;
+};
+
+struct tstream_readline_context *tstream_readline_create_context(TALLOC_CTX *mem_ctx,
+                                                                size_t chunk_size)
+{
+       struct tstream_readline_context *ctx;
+
+       ctx = talloc(mem_ctx, struct tstream_readline_context);
+       if (ctx == NULL) {
+               return NULL;
+       }
+
+       ctx->chunk_size = chunk_size;
+       ctx->next_buffer = NULL;
+
+       return ctx;
+}
+
+struct tstream_readline_state {
+       /* this structs are owned by the caller */
+       struct {
+               struct tevent_context *ev;
+               struct tstream_context *stream;
+               struct tstream_readline_context *ctx;
+               size_t max_len;
+       } caller;
+       size_t checked_ofs;
+       uint8_t *current_buffer;
+       struct iovec next_iov;
+};
+
+static void tstream_readline_check(struct tevent_req *subreq);
+
+struct tevent_req *tstream_readline_send(TALLOC_CTX *mem_ctx,
+                                        struct tevent_context *ev,
+                                        struct tstream_context *stream,
+                                        struct tstream_readline_context *ctx,
+                                        size_t max_len)
+{
+       struct tevent_req *req;
+       struct tstream_readline_state *state;
+       bool ok;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct tstream_readline_state);
+       if (!req) {
+               return NULL;
+       }
+
+       state->caller.ev        = ev;
+       state->caller.stream    = stream;
+       state->caller.ctx       = ctx;
+       state->caller.max_len   = max_len;
+       state->checked_ofs      = 0;
+       state->current_buffer   = NULL;
+
+       if (ctx->next_buffer) {
+               state->current_buffer = talloc_move(state,
+                                                   &ctx->next_buffer);
+       }
+
+       tstream_readline_check(req);
+       if (!tevent_req_is_in_progress(req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       return req;
+}
+
+static void tstream_readline_handler(struct tevent_req *subreq);
+
+static void tstream_readline_check(struct tevent_req *req)
+{
+       struct tstream_readline_state *state = tevent_req_data(req,
+                                       struct tstream_readline_state);
+       struct tstream_readline_context *ctx = state->caller.ctx;
+       size_t len = talloc_get_size(state->current_buffer);
+       bool end = false;
+
+       for (i=state->checked_ofs; i < len; i++) {
+               state->checked_ofs++;
+
+               if (state->current_buffer[i] == '\n') {
+                       end = true;
+                       break;
+               }
+               if (state->current_buffer[i] == '\0') {
+                       end = true;
+                       break;
+               }
+
+               if (state->checked_ofs > state->caller.max_len) {
+                       tevent_req_error(req, EMSGSIZE);
+                       return;
+               }
+       }
+
+       if (!end) {
+               goto more;
+       }
+
+       remaining = len - state->checked_ofs;
+
+       if (remaining > 0 ) {
+               ctx->next_buffer = talloc_array(ctx, uint8_t,
+                                               remaining);
+               if (tevent_req_nomem(ctx->next_buffer, req)) {
+                       return;
+               }
+
+               memcpy(ctx->next_buffer,
+                      state->current_buffer + state->checked_ofs,
+                      remaining);
+
+               /* truncating can't fail */
+               talloc_realloc(state, state->current_buffer,
+                              uint8_t, state->checked_ofs);
+       }
+
+       tevent_req_done(req);
+       return;
+
+more:
+
+       pending = tstream_pending_bytes(stream);
+       if (pending == -1) {
+               tevent_req_error(req, errno);
+               return;
+       }
+
+       if ((pending > 0) && (pending < ctx->chunk_size)) {
+               nread = pending;
+       } else {
+               nread = ctx->chunk_size;
+       }
+
+       state->current_buffer = talloc_realloc(state,
+                                              state->current_buffer,
+                                              uint8_t,
+                                              state->checked_ofs + nread);
+       if (tevent_req_nomem(state->current_buffer, req)) {
+               return;
+       }
+
+       state->next_iov.iov_base = state->current_buffer + state->checked_ofs;
+       state->next_iov.iov_len = nread;
+
+       subreq = tstream_readv_send(state,
+                                   state->caller.ev,
+                                   state->caller.stream,
+                                   &state->next_iov, 1);
+       if (tevent_req_nomem(subreq, req)) {
+               return;
+       }
+       tevent_req_set_callback(subreq, tstream_readline_handler, req);
+}
+
+static void tstream_readline_handler(struct tevent_req *subreq)
+{
+       struct tevent_req *req = tevent_req_callback_data(subreq,
+                                struct tevent_req);
+       struct tstream_readline_state *state = tevent_req_data(req,
+                                       struct tstream_readline_state);
+       struct tstream_readline_context *ctx = state->caller.ctx;
+       int ret;
+       int sys_errno;
+
+       ret = tstream_readv_recv(subreq, &sys_errno);
+       TALLOC_FREE(subreq);
+       if (ret == -1) {
+               tevent_req_error(req, sys_errno);
+               return;
+       }
+
+       tstream_readline_check(req);
+}
+
+ssize_t tstream_readline_recv(struct tevent_req *req,
+                             int *perrno,
+                             TALLOC_CTX *mem_ctx,
+                             const char **line)
+{
+       struct tstream_readline_state *state = tevent_req_data(req,
+                               struct tstream_readline_state);
+       ssize_t ret;
+
+       ret = tsocket_simple_int_recv(req, perrno);
+       if (ret == 0) {
+               uint8_t *l;
+               l = talloc_move(mem_ctx, &state->current_buffer);
+               *line = (char *)l;
+               ret = state->checked_ofs;
+       }
+
+       tevent_req_received(req);
+       return ret;
+}
+