s3: smbd: Change aio_pending_size static variable to a new "aio max threads" smb...
authorJeremy Allison <jra@samba.org>
Thu, 12 Nov 2015 21:23:30 +0000 (13:23 -0800)
committerJeremy Allison <jra@samba.org>
Fri, 13 Nov 2015 20:36:19 +0000 (21:36 +0100)
Removes accessor functions as now this parameter is set
under user control in smb.conf. Default is 100.

Note that this doesn't limit the number of outstanding
aio requests, it just causes them to go onto the
pthreadpool queue.

Now we need to prioritize pthreadpool pipe replies
ahead of incoming SMB2 requests, but that's a patch
for another day.

Based on ideas from Volker.

Signed-off-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
Reviewed-by: Christof Schmitt <cs@samba.org>
Reviewed-by: Michael Adam <obnox@samba.org>
docs-xml/smbdotconf/tuning/aiomaxthreads.xml [new file with mode: 0644]
lib/param/loadparm.c
source3/modules/vfs_aio_fork.c
source3/modules/vfs_aio_linux.c
source3/modules/vfs_aio_pthread.c
source3/modules/vfs_default.c
source3/param/loadparm.c
source3/smbd/aio.c
source3/smbd/proto.h

diff --git a/docs-xml/smbdotconf/tuning/aiomaxthreads.xml b/docs-xml/smbdotconf/tuning/aiomaxthreads.xml
new file mode 100644 (file)
index 0000000..3afe989
--- /dev/null
@@ -0,0 +1,19 @@
+<samba:parameter name="aio max threads"
+                 type="integer"
+                 context="G"
+                 xmlns:samba="http://www.samba.org/samba/DTD/samba-doc">
+<description>
+  <para>
+    The integer parameter specifies the maximum number of
+    threads each smbd process will create when doing parallel asynchronous IO
+    calls. If the number of outstanding calls is greater than this
+    number the requests will not be refused but go onto a queue
+    and will be scheduled in turn as outstanding requests complete.
+  </para>
+
+  <related>aio read size</related>
+  <related>aio write size</related>
+</description>
+
+<value type="default">100</value>
+</samba:parameter>
index 871d2d90ebc6b123d07f609a03f97fb680545524..640c60230e7629bfe852a7964fe6b48c37fff38d 100644 (file)
@@ -2809,6 +2809,8 @@ struct loadparm_context *loadparm_init(TALLOC_CTX *mem_ctx)
 
        lpcfg_do_global_parameter(lp_ctx, "printjob username", "%U");
 
+       lpcfg_do_global_parameter(lp_ctx, "aio max threads", "100");
+
        /* Allow modules to adjust defaults */
        for (defaults_hook = defaults_hooks; defaults_hook;
                 defaults_hook = defaults_hook->next) {
index 25a72c62f5ff9418b0e6cf3c6dd9f7f98d2e5bff..472ef0cdad19031085250cb6f10289c9b3a1c6df 100644 (file)
@@ -899,17 +899,6 @@ static int aio_fork_connect(vfs_handle_struct *handle, const char *service,
                                NULL, struct aio_fork_config,
                                return -1);
 
-       /*********************************************************************
-        * How many threads to initialize ?
-        * 100 per process seems insane as a default until you realize that
-        * (a) Threads terminate after 1 second when idle.
-        * (b) Throttling is done in SMB2 via the crediting algorithm.
-        * (c) SMB1 clients are limited to max_mux (50) outstanding
-        *     requests and Windows clients don't use this anyway.
-        * Essentially we want this to be unlimited unless smb.conf
-        * says different.
-        *********************************************************************/
-       set_aio_pending_size(100);
        return 0;
 }
 
index 599272e386125d0999c5898a14fff960380766db..4f6230a2fdf103693aadd81aecaa2775d73f36c3 100644 (file)
@@ -113,12 +113,12 @@ static bool init_aio_linux(struct vfs_handle_struct *handle)
                goto fail;
        }
 
-       if (io_queue_init(get_aio_pending_size(), &io_ctx)) {
+       if (io_queue_init(lp_aio_max_threads(), &io_ctx)) {
                goto fail;
        }
 
        DEBUG(10,("init_aio_linux: initialized with up to %d events\n",
-                 get_aio_pending_size()));
+                 (int)lp_aio_max_threads()));
 
        return true;
 
@@ -321,25 +321,7 @@ static int aio_linux_int_recv(struct tevent_req *req, int *err)
        return aio_linux_recv(req, err);
 }
 
-static int aio_linux_connect(vfs_handle_struct *handle, const char *service,
-                              const char *user)
-{
-       /*********************************************************************
-        * How many io_events to initialize ?
-        * 128 per process seems insane as a default until you realize that
-        * (a) Throttling is done in SMB2 via the crediting algorithm.
-        * (b) SMB1 clients are limited to max_mux (50) outstanding
-        *     requests and Windows clients don't use this anyway.
-        * Essentially we want this to be unlimited unless smb.conf
-        * says different.
-        *********************************************************************/
-       set_aio_pending_size(lp_parm_int(
-               SNUM(handle->conn), "aio_linux", "aio num events", 128));
-       return SMB_VFS_NEXT_CONNECT(handle, service, user);
-}
-
 static struct vfs_fn_pointers vfs_aio_linux_fns = {
-       .connect_fn = aio_linux_connect,
        .pread_send_fn = aio_linux_pread_send,
        .pread_recv_fn = aio_linux_recv,
        .pwrite_send_fn = aio_linux_pwrite_send,
index 72c812f14e0140a29659c9a2226516b113c266e8..10a3a23100b33626222f7d00cf93c527fe4def78 100644 (file)
@@ -51,7 +51,7 @@ static bool init_aio_threadpool(struct tevent_context *ev_ctx,
                return true;
        }
 
-       ret = pthreadpool_init(get_aio_pending_size(), pp_pool);
+       ret = pthreadpool_init(lp_aio_max_threads(), pp_pool);
        if (ret) {
                errno = ret;
                return false;
@@ -69,7 +69,7 @@ static bool init_aio_threadpool(struct tevent_context *ev_ctx,
        }
 
        DEBUG(10,("init_aio_threadpool: initialized with up to %d threads\n",
-                 get_aio_pending_size()));
+                 (int)lp_aio_max_threads()));
 
        return true;
 }
index f3ebb897580b2d7d7ce5e4296b425521109c840a..819a1a1ca6fce4dc2d336d4851d7b3f7d8a05f7e 100644 (file)
@@ -716,7 +716,7 @@ static bool vfswrap_init_asys_ctx(struct smbd_server_connection *conn)
                return true;
        }
 
-       ret = asys_context_init(&ctx, get_aio_pending_size());
+       ret = asys_context_init(&ctx, lp_aio_max_threads());
        if (ret != 0) {
                DEBUG(1, ("asys_context_init failed: %s\n", strerror(ret)));
                return false;
index b24d198a1d926cfb4cf95b74f49ba537cdab5b3a..9f40e65f33f83bae55d15f6b5054de15ed4f51bb 100644 (file)
@@ -917,6 +917,8 @@ static void init_globals(struct loadparm_context *lp_ctx, bool reinit_globals)
 
        Globals.web_port = 901;
 
+       Globals.aio_max_threads = 100;
+
        /* Now put back the settings that were set with lp_set_cmdline() */
        apply_lp_set_cmdline();
 }
index 3923eacdf64d9b9a644a9724f906dd4b65049846..32a1ce0e4a0bc74b5bf230419890cd3656de75b7 100644 (file)
  Statics plus accessor functions.
 *****************************************************************************/
 
-static int aio_pending_size = 100; /* Current max threads. */
 static int outstanding_aio_calls;
 
-int get_aio_pending_size(void)
-{
-       return aio_pending_size;
-}
-
-void set_aio_pending_size(int newsize)
-{
-       aio_pending_size = newsize;
-}
-
 int get_outstanding_aio_calls(void)
 {
        return outstanding_aio_calls;
index 95414e638b092b3c2193ee1f2d6794de61a389e2..7926dd6d16d6c3f7d910c7123ac0e716705a0dbc 100644 (file)
@@ -66,8 +66,6 @@ void srv_set_signing(struct smbXsrv_connection *conn,
 
 /* The following definitions come from smbd/aio.c  */
 
-int get_aio_pending_size(void);
-void set_aio_pending_size(int newsize);
 int get_outstanding_aio_calls(void);
 void increment_outstanding_aio_calls(void);
 void decrement_outstanding_aio_calls(void);