torture3: Test lock upgrade/downgrade
authorVolker Lendecke <vl@samba.org>
Fri, 19 May 2017 15:02:08 +0000 (17:02 +0200)
committerVolker Lendecke <vl@samba.org>
Thu, 15 Jun 2017 11:19:14 +0000 (13:19 +0200)
Signed-off-by: Volker Lendecke <vl@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org>
source3/selftest/tests.py
source3/torture/proto.h
source3/torture/test_g_lock.c
source3/torture/torture.c

index 7ad75b717b90ae9f171bf3aff474c2111f7d9365..07af4951a75b6407558b5676985bf650ef0bf156 100755 (executable)
@@ -150,6 +150,7 @@ local_tests = [
     "LOCAL-DBWRAP-WATCH2",
     "LOCAL-G-LOCK1",
     "LOCAL-G-LOCK2",
+    "LOCAL-G-LOCK3",
     "LOCAL-hex_encode_buf",
     "LOCAL-remove_duplicate_addrs2"]
 
index df2a241665e05fc3ea8af9fd8077f4b9401c2acd..046e9f9b4ede0e8bcbf86ee7ba3e42fed3a017f3 100644 (file)
@@ -126,5 +126,6 @@ bool run_oplock_cancel(int dummy);
 bool run_pthreadpool_tevent(int dummy);
 bool run_g_lock1(int dummy);
 bool run_g_lock2(int dummy);
+bool run_g_lock3(int dummy);
 
 #endif /* __TORTURE_H__ */
index ac45174a438be8c3afb071a6eb99741b8d5b20d7..154b1c168c2412c08709462c066f03b3801243e2 100644 (file)
@@ -22,6 +22,7 @@
 #include "system/filesys.h"
 #include "g_lock.h"
 #include "messages.h"
+#include "lib/util/server_id.h"
 
 static bool get_g_lock_ctx(TALLOC_CTX *mem_ctx,
                           struct tevent_context **ev,
@@ -191,6 +192,125 @@ bool run_g_lock2(int dummy)
                goto fail;
        }
 
+       ret = true;
+fail:
+       TALLOC_FREE(ctx);
+       TALLOC_FREE(msg);
+       TALLOC_FREE(ev);
+       return ret;
+}
+
+struct lock3_parser_state {
+       struct server_id self;
+       enum g_lock_type lock_type;
+       bool ok;
+};
+
+static void lock3_parser(const struct g_lock_rec *locks,
+                        size_t num_locks,
+                        const uint8_t *data,
+                        size_t datalen,
+                        void *private_data)
+{
+       struct lock3_parser_state *state = private_data;
+
+       if (datalen != 0) {
+               fprintf(stderr, "datalen=%zu\n", datalen);
+               return;
+       }
+       if (num_locks != 1) {
+               fprintf(stderr, "num_locks=%zu\n", num_locks);
+               return;
+       }
+       if (locks[0].lock_type != state->lock_type) {
+               fprintf(stderr, "found type %d, expected %d\n",
+                       (int)locks[0].lock_type, (int)state->lock_type);
+               return;
+       }
+       if (!server_id_equal(&locks[0].pid, &state->self)) {
+               struct server_id_buf tmp1, tmp2;
+               fprintf(stderr, "found pid %s, expected %s\n",
+                       server_id_str_buf(locks[0].pid, &tmp1),
+                       server_id_str_buf(state->self, &tmp2));
+               return;
+       }
+
+       state->ok = true;
+}
+
+/*
+ * Test lock upgrade/downgrade
+ */
+
+bool run_g_lock3(int dummy)
+{
+       struct tevent_context *ev = NULL;
+       struct messaging_context *msg = NULL;
+       struct g_lock_ctx *ctx = NULL;
+       const char *lockname = "lock3";
+       struct lock3_parser_state state;
+       NTSTATUS status;
+       bool ret = false;
+       bool ok;
+
+       ok = get_g_lock_ctx(talloc_tos(), &ev, &msg, &ctx);
+       if (!ok) {
+               goto fail;
+       }
+
+       state.self = messaging_server_id(msg);
+
+       status = g_lock_lock(ctx, lockname, G_LOCK_READ,
+                            (struct timeval) { .tv_sec = 1 });
+       if (!NT_STATUS_IS_OK(status)) {
+               fprintf(stderr, "g_lock_lock returned %s\n",
+                       nt_errstr(status));
+               goto fail;
+       }
+
+       status = g_lock_lock(ctx, lockname, G_LOCK_READ,
+                            (struct timeval) { .tv_sec = 1 });
+       if (!NT_STATUS_EQUAL(status, NT_STATUS_WAS_LOCKED)) {
+               fprintf(stderr, "g_lock_lock returned %s, expected %s\n",
+                       nt_errstr(status), nt_errstr(NT_STATUS_WAS_LOCKED));
+               goto fail;
+       }
+
+       state.lock_type = G_LOCK_READ;
+       state.ok = false;
+
+       status = g_lock_dump(ctx, lockname, lock3_parser, &state);
+       if (!NT_STATUS_EQUAL(status, NT_STATUS_OK)) {
+               fprintf(stderr, "g_lock_dump returned %s\n",
+                       nt_errstr(status));
+               goto fail;
+       }
+       if (!state.ok) {
+               goto fail;
+       }
+
+       status = g_lock_lock(ctx, lockname, G_LOCK_WRITE,
+                            (struct timeval) { .tv_sec = 1 });
+       if (!NT_STATUS_IS_OK(status)) {
+               fprintf(stderr, "g_lock_lock returned %s\n",
+                       nt_errstr(status));
+               goto fail;
+       }
+
+       state.lock_type = G_LOCK_WRITE;
+       state.ok = false;
+
+       status = g_lock_dump(ctx, lockname, lock3_parser, &state);
+       if (!NT_STATUS_EQUAL(status, NT_STATUS_OK)) {
+               fprintf(stderr, "g_lock_dump returned %s\n",
+                       nt_errstr(status));
+               goto fail;
+       }
+       if (!state.ok) {
+               goto fail;
+       }
+
+
        ret = true;
 fail:
        TALLOC_FREE(ctx);
index 2bdb079f83ae907a3cd501d855736f22d8a9a90e..4661da95d78d52a3221bbc1336354da86a98c3fe 100644 (file)
@@ -11479,6 +11479,7 @@ static struct {
        { "LOCAL-PTHREADPOOL-TEVENT", run_pthreadpool_tevent, 0 },
        { "LOCAL-G-LOCK1", run_g_lock1, 0 },
        { "LOCAL-G-LOCK2", run_g_lock2, 0 },
+       { "LOCAL-G-LOCK3", run_g_lock3, 0 },
        { "LOCAL-CANONICALIZE-PATH", run_local_canonicalize_path, 0 },
        { "qpathinfo-bufsize", run_qpathinfo_bufsize, 0 },
        {NULL, NULL, 0}};