struct {
struct tstream_context *stream;
+ dcerpc_connection_use_trans_fn use_trans_fn;
struct tevent_queue *write_queue;
} transport;
struct {
uint16_t max_xmit_frag;
uint16_t max_recv_frag;
- bool concurent_multiplex;
+ bool concurrent_multiplex;
bool bind_done;
} features;
struct tevent_queue *out_queue;
struct dcerpc_call *list;
struct dcerpc_call *active;
+ struct dcerpc_call *new_call;
} calls;
struct {
enum dcerpc_AuthType auth_type;
enum dcerpc_AuthLevel auth_level;
struct gensec_security *gensec;
+ bool client_hdr_signing;
+ bool hdr_signing;
+ bool verified_bitmask1;
};
struct dcerpc_presentation {
struct dcerpc_ctx_list req;
struct dcerpc_ack_ctx ack;
} negotiate;
+ bool verified_pcontext;
};
struct dcerpc_call {
return NULL;
}
+ conn->features.max_xmit_frag = 4280;
+ conn->features.max_recv_frag = 4280;
+
conn->calls.out_queue = tevent_queue_create(conn, "out_queue");
if (conn->calls.out_queue == NULL) {
talloc_free(conn);
return conn;
}
+void dcerpc_connection_set_use_trans_fn(struct dcerpc_connection *conn,
+ dcerpc_connection_use_trans_fn fn)
+{
+ conn->transport.use_trans_fn = fn;
+}
+
struct dcerpc_security *dcerpc_security_allocate(
TALLOC_CTX *mem_ctx,
struct dcerpc_connection *conn,
return pres;
}
+struct dcerpc_presentation *dcerpc_presentation_test_copy(
+ TALLOC_CTX *mem_ctx,
+ const struct dcerpc_presentation *pres1,
+ const uint32_t *context_id,
+ const struct ndr_interface_table *table,
+ const struct ndr_syntax_id *transfer)
+{
+ struct dcerpc_presentation *pres2;
+
+ pres2 = talloc_zero(mem_ctx, struct dcerpc_presentation);
+ if (pres2 == NULL) {
+ return NULL;
+ }
+ *pres2 = *pres1;
+
+ if (context_id != NULL) {
+ pres2->context_id = *context_id;
+ }
+ if (table != NULL) {
+ pres2->table = table;
+ }
+ if (transfer != NULL) {
+ pres2->transfer = *transfer;
+ }
+
+ pres2->negotiate.req.abstract_syntax = pres2->table->syntax_id;
+ pres2->negotiate.req.context_id = pres2->context_id;
+ pres2->negotiate.req.num_transfer_syntaxes = 1;
+ pres2->negotiate.req.transfer_syntaxes = &pres2->transfer;
+
+ pres2->negotiate.ack.result = DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION;
+ pres2->negotiate.ack.reason.value =
+ DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED;
+
+ return pres2;
+}
+
struct dcerpc_call *dcerpc_call_allocate(TALLOC_CTX *mem_ctx,
struct dcerpc_association *assoc,
struct dcerpc_security *sec,
return NT_STATUS_OK;
}
+static NTSTATUS dcerpc_ncacn_push_auth(DATA_BLOB *blob,
+ TALLOC_CTX *mem_ctx,
+ struct ncacn_packet *pkt,
+ struct dcerpc_auth *auth_info)
+{
+ struct ndr_push *ndr;
+ enum ndr_err_code ndr_err;
+
+ ndr = ndr_push_init_ctx(mem_ctx);
+ if (!ndr) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ if (!(pkt->drep[0] & DCERPC_DREP_LE)) {
+ ndr->flags |= LIBNDR_FLAG_BIGENDIAN;
+ }
+
+ if (pkt->pfc_flags & DCERPC_PFC_FLAG_OBJECT_UUID) {
+ ndr->flags |= LIBNDR_FLAG_OBJECT_PRESENT;
+ }
+
+ if (auth_info) {
+ pkt->auth_length = auth_info->credentials.length;
+ } else {
+ // pkt->auth_length = 0;
+ }
+
+ ndr_err = ndr_push_ncacn_packet(ndr, NDR_SCALARS|NDR_BUFFERS, pkt);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ return ndr_map_error2ntstatus(ndr_err);
+ }
+
+ if (auth_info) {
+#if 0
+ /* the s3 rpc server doesn't handle auth padding in
+ bind requests. Use zero auth padding to keep us
+ working with old servers */
+ uint32_t offset = ndr->offset;
+ ndr_err = ndr_push_align(ndr, 16);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ return ndr_map_error2ntstatus(ndr_err);
+ }
+ auth_info->auth_pad_length = ndr->offset - offset;
+#else
+ auth_info->auth_pad_length = 0;
+#endif
+ ndr_err = ndr_push_dcerpc_auth(ndr, NDR_SCALARS|NDR_BUFFERS, auth_info);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ return ndr_map_error2ntstatus(ndr_err);
+ }
+ }
+
+ *blob = ndr_push_blob(ndr);
+
+ /* fill in the frag length */
+ dcerpc_set_frag_length(blob, blob->length);
+
+ return NT_STATUS_OK;
+}
+
static NTSTATUS dcerpc_ncacn_packet_blob(TALLOC_CTX *mem_ctx,
enum dcerpc_pkt_type ptype,
uint8_t pfc_flags,
DATA_BLOB *blob)
{
struct ncacn_packet r;
- enum ndr_err_code ndr_err;
+ NTSTATUS status;
r.rpc_vers = 5;
r.rpc_vers_minor = 0;
r.call_id = call_id;
r.u = *u;
- ndr_err = ndr_push_struct_blob(blob, mem_ctx, &r,
- (ndr_push_flags_fn_t)ndr_push_ncacn_packet);
- if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
- return ndr_map_error2ntstatus(ndr_err);
+ status = dcerpc_ncacn_push_auth(blob, mem_ctx, &r, NULL);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
}
- dcerpc_set_frag_length(blob, blob->length);
-
-
if (DEBUGLEVEL >= 10) {
/* set frag len for print function */
r.frag_length = blob->length;
return status;
}
+ if (auth_info.auth_type != sec->auth_type) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ if (auth_info.auth_level != sec->auth_level) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ if (auth_info.auth_context_id != sec->context_id) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
data = data_blob_const(raw_pkt->data + header_size,
pkt_trailer->length - auth_length);
full_pkt = data_blob_const(raw_pkt->data,
full_pkt.data,
full_pkt.length,
&auth_info.credentials);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
break;
case DCERPC_AUTH_LEVEL_INTEGRITY:
full_pkt.data,
full_pkt.length,
&auth_info.credentials);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
break;
default:
return NT_STATUS_INVALID_PARAMETER;
static NTSTATUS dcerpc_connection_loop_restart(struct dcerpc_connection *conn,
struct tevent_context *ev)
{
+ if (ev == NULL) {
+ return NT_STATUS_INVALID_PARAMETER; // TODO...
+ }
+
if (conn->loop.subreq) {
if (conn->loop.ev != ev) {
return NT_STATUS_INVALID_PARAMETER; // TODO...
return NT_STATUS_NO_MEMORY;
}
tevent_req_set_callback(conn->loop.subreq, dcerpc_connection_loop, conn);
+ conn->loop.ev = ev;
return NT_STATUS_OK;
}
DATA_BLOB pdu;
struct dcerpc_call *call;
bool valid_type = false;
+ bool allow_fragments = false;
conn->loop.subreq = NULL;
return;
}
+ if (DEBUGLEVEL >= 10) {
+ NDR_PRINT_DEBUG(ncacn_packet, pkt);
+ }
+
switch (pkt->ptype) {
case DCERPC_PKT_REQUEST:
/* Ordinary request. */
valid_type = true;
+ allow_fragments = true;
break;
case DCERPC_PKT_PING:
case DCERPC_PKT_RESPONSE:
/* Ordinary reply. */
valid_type = true;
+ allow_fragments = true;
break;
case DCERPC_PKT_FAULT:
return;
}
+ if (!(pkt->pfc_flags & DCERPC_PFC_FLAG_LAST)) {
+ if (!allow_fragments) {
+ TALLOC_FREE(subreq);
+ // disconnect and notify pending calls NT_STATUS_RPC_PROTOCOL_ERROR;
+ return;
+ }
+ }
+
+ if (conn->calls.active != NULL) {
+
+ if (pkt->call_id != conn->calls.active->call_id) {
+ TALLOC_FREE(subreq);
+ // disconnect and notify pending calls NT_STATUS_RPC_PROTOCOL_ERROR;
+ return;
+ }
+
+ if (pkt->pfc_flags & DCERPC_PFC_FLAG_FIRST) {
+ TALLOC_FREE(subreq);
+ // disconnect and notify pending calls NT_STATUS_RPC_PROTOCOL_ERROR;
+ return;
+ }
+
+ call = conn->calls.active;
+
+ if (pkt->pfc_flags & DCERPC_PFC_FLAG_LAST) {
+ conn->calls.active = NULL;
+ }
+ } else {
+ if (!(pkt->pfc_flags & DCERPC_PFC_FLAG_FIRST)) {
+ TALLOC_FREE(subreq);
+ // disconnect and notify pending calls NT_STATUS_RPC_PROTOCOL_ERROR;
+ return;
+ }
+
+ call = conn->calls.list;
+ }
+
for (call = conn->calls.list; call; call = call->next) {
if (call->call_id == pkt->call_id) {
break;
}
}
+ if (call == NULL) {
+ call = conn->calls.new_call;
+ }
+
if (call == NULL) {
TALLOC_FREE(subreq);
// disconnect and notify pending calls NT_STATUS_RPC_PROTOCOL_ERROR;
return;
}
+ if (!(pkt->pfc_flags & DCERPC_PFC_FLAG_LAST)) {
+ conn->calls.active = call;
+
+ // TODO
+ //
+ // reassemble and return
+ }
+
error = call->incoming.handler(call->incoming.private_data, pkt, pdu);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(error)) {
return;
}
+ if (conn->calls.new_call == NULL) {
+ conn->loop.ev = NULL;
+ return;
+ }
+
error = dcerpc_connection_loop_restart(conn, conn->loop.ev);
if (!NT_STATUS_IS_OK(error)) {
- TALLOC_FREE(subreq);
// disconnect and notify pending calls
return;
}
struct dcerpc_connection *conn;
struct dcerpc_call *call;
struct dcerpc_security *sec;
+ bool proposed_hdr_signing;
DATA_BLOB sec_in;
NTSTATUS sec_status;
DATA_BLOB sec_out;
};
struct dcerpc_do_bind_out_frag {
+ struct tevent_context *ev;
+ struct dcerpc_connection *conn;
struct tevent_req *req;
enum dcerpc_pkt_type ptype;
DATA_BLOB blob;
struct iovec vector;
+ struct tevent_req *subreq_wait1;
+ struct tevent_req *subreq_wait2;
};
-static void dcerpc_do_bind_out_frag_next(struct tevent_req *req,
- void *private_data);
+static void dcerpc_do_bind_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state);
+
static void dcerpc_do_bind_sec_next(struct tevent_req *subreq);
+static void dcerpc_do_bind_out_frag_next(struct tevent_req *subreq);
static NTSTATUS dcerpc_do_bind_handle_in_frag(void *private_data,
struct ncacn_packet *pkt,
{
struct tevent_req *req;
struct dcerpc_do_bind_state *state;
- bool ok;
+ struct tevent_req *subreq;
req = tevent_req_create(mem_ctx, &state,
struct dcerpc_do_bind_state);
state->conn = conn;
state->call = call;
state->sec = sec;
+ state->remaining_pres = num_pres;
state->num_pres = num_pres;
state->pres = pres;
state->call->incoming.handler = dcerpc_do_bind_handle_in_frag;
DLIST_ADD_END(state->conn->calls.list, state->call, NULL);
- talloc_set_destructor(state, NULL);//TODO clear state->out_frag->req
-
- if (state->sec != NULL && state->sec->gensec != NULL) {
- struct tevent_req *subreq;
+ tevent_req_set_cleanup_fn(req, dcerpc_do_bind_cleanup);
+ tevent_req_defer_callback(req, ev);
+ if (state->sec && state->sec->auth_type != DCERPC_AUTH_TYPE_NONE) {
subreq = gensec_update_send(state, ev,
state->sec->gensec,
state->sec_in);
return req;
}
- ok = tevent_queue_add(state->conn->calls.out_queue,
- state->ev,
- req,
- dcerpc_do_bind_out_frag_next,
- NULL);
- if (!ok) {
- tevent_req_nomem(NULL, req);
+ state->sec_status = NT_STATUS_OK;
+
+ subreq = tevent_queue_wait_send(state, state->ev,
+ state->conn->calls.out_queue);
+ if (tevent_req_nomem(subreq, req)) {
return tevent_req_post(req, ev);
}
+ tevent_req_set_callback(subreq, dcerpc_do_bind_out_frag_next, req);
return req;
}
+static void dcerpc_do_bind_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state)
+{
+ struct dcerpc_do_bind_state *state =
+ tevent_req_data(req,
+ struct dcerpc_do_bind_state);
+
+ if (state->out_frag != NULL) {
+ state->out_frag->req = NULL;
+ state->out_frag = NULL;
+ }
+
+ if (state->call != NULL) {
+ ZERO_STRUCT(state->call->incoming);
+ DLIST_REMOVE(state->conn->calls.list, state->call);
+ state->call = NULL;
+ }
+}
+
static void dcerpc_do_bind_sec_next(struct tevent_req *subreq)
{
struct tevent_req *req =
tevent_req_data(req,
struct dcerpc_do_bind_state);
NTSTATUS status;
- bool ok;
data_blob_free(&state->sec_out);
status = gensec_update_recv(subreq, state, &state->sec_out);
return;
}
- ok = tevent_queue_add(state->conn->calls.out_queue,
- state->ev,
- req,
- dcerpc_do_bind_out_frag_next,
- NULL);
- if (!ok) {
- tevent_req_nomem(NULL, req);
+ if (NT_STATUS_IS_OK(state->sec_status) &&
+ state->sec_out.length == 0)
+ {
+ tevent_req_done(req);
+ return;
+ }
+
+ if (state->sec->auth_level >= DCERPC_AUTH_LEVEL_INTEGRITY) {
+ state->sec->client_hdr_signing =
+ gensec_have_feature(state->sec->gensec,
+ GENSEC_FEATURE_SIGN_PKT_HEADER);
+ }
+
+ subreq = tevent_queue_wait_send(state, state->ev,
+ state->conn->calls.out_queue);
+ if (tevent_req_nomem(subreq, req)) {
return;
}
+ tevent_req_set_callback(subreq, dcerpc_do_bind_out_frag_next, req);
}
+static void dcerpc_do_bind_out_frag_trans_wait1(struct tevent_req *subreq);
static void dcerpc_do_bind_out_frag_done(struct tevent_req *subreq);
+static void dcerpc_do_bind_out_frag_trans_wait2(struct tevent_req *subreq);
-static void dcerpc_do_bind_out_frag_next(struct tevent_req *req,
- void *private_data)
+static void dcerpc_do_bind_out_frag_next(struct tevent_req *subreq)
{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
struct dcerpc_do_bind_state *state =
tevent_req_data(req,
struct dcerpc_do_bind_state);
size_t auth_len = 0;
NTSTATUS status;
DATA_BLOB auth_info = data_blob_null;
+ uint8_t pfc_flags = DCERPC_PFC_FLAG_FIRST | DCERPC_PFC_FLAG_LAST;
union dcerpc_payload u;
- struct tevent_req *subreq;
uint32_t i;
+ bool require_ack = false;
+ bool use_trans = true;
+ bool ok;
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ //status = NT_STATUS_INTERNAL_ERROR;
+ tevent_req_oom(req);
+ return;
+ }
+ TALLOC_FREE(subreq);
/*
* the fragment belongs to the connection instead of the request
if (tevent_req_nomem(frag, req)) {
return;
}
-
+ frag->ev = state->ev;
+ frag->conn = state->conn;
frag->req = req;
state->out_frag = frag;
+ if (state->sec && state->sec->auth_type != DCERPC_AUTH_TYPE_NONE) {
+ if (state->sec->client_hdr_signing &&
+ !state->proposed_hdr_signing)
+ {
+ state->proposed_hdr_signing = true;
+ pfc_flags |= DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN;
+ require_ack = true;
+ }
+ }
+
+ //TODO : DCERPC_PFC_FLAG_CONC_MPX
+
+ //TODO: remaining_pres
+
if (!state->conn->features.bind_done) {
frag->ptype = DCERPC_PKT_BIND;
+ } else if (require_ack) {
+ frag->ptype = DCERPC_PKT_ALTER;
} else if (state->remaining_pres > 0) {
frag->ptype = DCERPC_PKT_ALTER;
} else if (!NT_STATUS_IS_OK(state->sec_status)) {
frag->ptype = DCERPC_PKT_ALTER;
- } else if (state->sec_out.length > 0) {
+ } else {
frag->ptype = DCERPC_PKT_AUTH3;
}
- if (state->sec) {
+ if (state->sec && state->sec->auth_type != DCERPC_AUTH_TYPE_NONE) {
status = dcerpc_auth_blob(frag,
state->sec->auth_type,
state->sec->auth_level,
0, /* auth_pad_length */
state->sec->context_id, /* auth_context_id */
- &state->sec_in,
+ &state->sec_out,
&auth_info);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
status = dcerpc_ncacn_packet_blob(frag,
frag->ptype,
- DCERPC_PFC_FLAG_FIRST |
- DCERPC_PFC_FLAG_LAST,
+ pfc_flags,
auth_len,
state->call->call_id,
&u,
return;
}
+ if (frag->ptype == DCERPC_PKT_AUTH3) {
+ use_trans = false;
+ }
+
+ if (frag->conn->transport.use_trans_fn == NULL) {
+ use_trans = false;
+ }
+
+ if (frag->conn->loop.subreq != NULL) {
+ use_trans = false;
+ }
+
+ if (frag->conn->features.concurrent_multiplex) {
+ use_trans = false;
+ }
+
+ if (tevent_queue_length(frag->conn->calls.out_queue) > 1) {
+ use_trans = false;
+ }
+
+ if (use_trans) {
+ frag->subreq_wait1 = tevent_queue_wait_send(frag,
+ frag->ev,
+ frag->conn->transport.write_queue);
+ if (tevent_req_nomem(req, frag->subreq_wait1)) {
+ return;
+ }
+ tevent_req_set_callback(frag->subreq_wait1,
+ dcerpc_do_bind_out_frag_trans_wait1,
+ frag);
+ /*
+ * we need to block reads until our write is
+ * the next in the write queue.
+ */
+ frag->conn->loop.subreq = frag->subreq_wait1;
+ frag->conn->loop.ev = frag->ev;
+ }
+
/*
- * TODO: add smb_trans handling for ncacn_np
- *
* We need to add a dcerpc_write_fragment_queue_send/recv()
*/
frag->vector.iov_base = frag->blob.data;
frag->vector.iov_len = frag->blob.length;
- subreq = tstream_writev_queue_send(frag, state->ev,
- state->conn->transport.stream,
- state->conn->transport.write_queue,
+ subreq = tstream_writev_queue_send(frag, frag->ev,
+ frag->conn->transport.stream,
+ frag->conn->transport.write_queue,
&frag->vector, 1);
if (tevent_req_nomem(subreq, req)) {
return;
dcerpc_do_bind_out_frag_done,
frag);
- status = dcerpc_connection_loop_restart(state->conn, state->ev);
+ if (use_trans) {
+ frag->subreq_wait2 = tevent_queue_wait_send(frag,
+ frag->ev,
+ frag->conn->transport.write_queue);
+ if (tevent_req_nomem(req, frag->subreq_wait2)) {
+ return;
+ }
+ tevent_req_set_callback(frag->subreq_wait2,
+ dcerpc_do_bind_out_frag_trans_wait2,
+ frag);
+ }
+
+ if (frag->ptype == DCERPC_PKT_AUTH3) {
+ return;
+ }
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
if (tevent_req_nterror(req, status)) {
return;
}
}
+static void dcerpc_do_bind_out_frag_trans_wait1(struct tevent_req *subreq)
+{
+ struct dcerpc_do_bind_out_frag *frag =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_do_bind_out_frag);
+ struct tevent_req *req = frag->req;
+ NTSTATUS status;
+ bool ok;
+
+ /*
+ * TODO; what if the caller has been free'ed?
+ */
+
+ frag->subreq_wait1 = NULL;
+ frag->conn->loop.subreq = NULL;
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ status = NT_STATUS_INTERNAL_ERROR;
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ if (tevent_queue_length(frag->conn->transport.write_queue) > 3) {
+ /*
+ * We added 3 entries into the queue,
+ * wait1, writev and wait2.
+ *
+ * There's more to write, we should not block
+ * further writev calls for a trans call.
+ *
+ * The wait2 stage will trigger the read.
+ */
+ TALLOC_FREE(subreq);
+ return;
+ }
+
+ /*
+ * we don't need wait2 anymore, we're sure that
+ * we'll do a trans call.
+ */
+ TALLOC_FREE(frag->subreq_wait2);
+
+ status = frag->conn->transport.use_trans_fn(frag->conn->transport.stream);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ /* we free subreq after tstream_cli_np_use_trans */
+ TALLOC_FREE(subreq);
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+}
+
static void dcerpc_do_bind_out_frag_done(struct tevent_req *subreq)
{
struct dcerpc_do_bind_out_frag *frag =
tevent_req_done(req);
return;
}
+
+ if (frag->subreq_wait2 != NULL) {
+ return;
+ }
+
TALLOC_FREE(frag);
/* we need to wait for incoming pdus */
}
-static NTSTATUS dcerpc_do_bind_handle_in_frag(void *private_data,
- struct ncacn_packet *pkt,
- DATA_BLOB frag)
+static void dcerpc_do_bind_out_frag_trans_wait2(struct tevent_req *subreq)
{
- struct tevent_req *req =
- talloc_get_type_abort(private_data,
- struct tevent_req);
- struct dcerpc_do_bind_state *state =
- tevent_req_data(req,
- struct dcerpc_do_bind_state);
+ struct dcerpc_do_bind_out_frag *frag =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_do_bind_out_frag);
+ struct tevent_req *req = frag->req;
NTSTATUS status;
+ bool ok;
+
+ frag->subreq_wait2 = NULL;
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ status = NT_STATUS_INTERNAL_ERROR;
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ TALLOC_FREE(subreq);
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
- DLIST_REMOVE(state->conn->calls.list, state->call);
+ TALLOC_FREE(frag);
+
+ /* we need to wait for incoming pdus */
+}
+
+static NTSTATUS dcerpc_do_bind_handle_in_frag(void *private_data,
+ struct ncacn_packet *pkt,
+ DATA_BLOB frag)
+{
+ struct tevent_req *req =
+ talloc_get_type_abort(private_data,
+ struct tevent_req);
+ struct dcerpc_do_bind_state *state =
+ tevent_req_data(req,
+ struct dcerpc_do_bind_state);
+ NTSTATUS status;
+ size_t i;
/* Ensure we have the correct type. */
switch (pkt->ptype) {
case DCERPC_PKT_BIND_ACK:
case DCERPC_PKT_ALTER_RESP:
+ if (!state->conn->features.bind_done) {
+ if (pkt->u.bind_ack.max_recv_frag < 1234) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ if (pkt->u.bind_ack.max_xmit_frag < 1234) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ state->conn->features.max_recv_frag =
+ pkt->u.bind_ack.max_recv_frag;
+ state->conn->features.max_xmit_frag =
+ pkt->u.bind_ack.max_xmit_frag;
+
+ if (pkt->pfc_flags & DCERPC_PFC_FLAG_CONC_MPX) {
+ state->conn->features.concurrent_multiplex = true;
+ }
+
+ state->conn->features.bind_done = true;
+ }
+
+ if (!state->conn->assoc->negotiate_done) {
+ state->conn->assoc->negotiate_done = true;
+ state->conn->assoc->assoc_group_id = pkt->u.bind_ack.assoc_group_id;
+ }
+
+ if (pkt->u.bind_ack.assoc_group_id != state->conn->assoc->assoc_group_id) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ if (pkt->u.bind_ack.num_results > state->num_ctx) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ state->remaining_pres = 0;
+
+ if (state->proposed_hdr_signing) {
+ if (pkt->pfc_flags & DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN) {
+ state->sec->hdr_signing = true;
+ }
+ }
+
+ for (i = 0; i < pkt->u.bind_ack.num_results; i++) {
+ struct dcerpc_ack_ctx *ack = &pkt->u.bind_ack.ctx_list[i];
+
+ if (i < state->num_pres) {
+ state->pres[i]->negotiate.ack = *ack;
+ continue;
+ }
+
+ if (ack->result != DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK) {
+ continue;
+ }
+
+ state->conn->assoc->features = state->conn->assoc->client_features;
+ state->conn->assoc->features &= ack->reason.negotiate;
+ }
+
+ for (i = 0; i < state->num_pres; i++) {
+ struct dcerpc_ack_ctx *ack = &state->pres[i]->negotiate.ack;
+ bool ok;
+
+ if (ack->result != DCERPC_BIND_ACK_RESULT_ACCEPTANCE) {
+ continue;
+ }
+
+ ok = ndr_syntax_id_equal(&state->pres[i]->transfer,
+ &ack->syntax);
+ if (!ok) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ }
+
+ if (pkt->auth_length >= 8) {
+ struct tevent_req *subreq;
+
+ state->sec_in = data_blob_talloc(state,
+ pkt->u.bind_ack.auth_info.data + 8,
+ pkt->u.bind_ack.auth_info.length - 8);
+
+ subreq = gensec_update_send(state, state->ev,
+ state->sec->gensec,
+ state->sec_in);
+ if (tevent_req_nomem(subreq, req)) {
+ return NT_STATUS_OK;
+ }
+ tevent_req_set_callback(subreq, dcerpc_do_bind_sec_next, req);
+ return NT_STATUS_OK;
+ }
+
+ tevent_req_done(req);
+ return NT_STATUS_OK;
+
+ //case DCERPC_PKT_ALTER_RESP:
+ if (pkt->auth_length != 0) {
+ return NT_STATUS_NOT_IMPLEMENTED;
+ }
- state->sec_status = NT_STATUS_NOT_IMPLEMENTED;
+ return NT_STATUS_NOT_IMPLEMENTED;
//TODO
#if 0
/* Point the return values at the NDR data. */
"TODO"));
return NT_STATUS_RPC_PROTOCOL_ERROR;
}
+
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
}
NTSTATUS dcerpc_do_bind_recv(struct tevent_req *req)
const DATA_BLOB *blob;
size_t ofs;
bool bigendian;
+ DATA_BLOB trailer;
+ size_t trailer_ofs;
} request;
+ bool verify_bitmask1;
+ bool verify_pcontext;
+ bool got_first;
struct dcerpc_do_request_out_frag *out_frag;
struct {
DATA_BLOB blob;
};
struct dcerpc_do_request_out_frag {
+ struct tevent_context *ev;
+ struct dcerpc_connection *conn;
struct tevent_req *req;
DATA_BLOB blob;
bool is_last;
struct iovec vector;
+ struct tevent_req *subreq_wait1;
+ struct tevent_req *subreq_wait2;
};
+static void dcerpc_do_request_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state);
+
+static void dcerpc_do_request_verification_trailer(struct tevent_req *req);
static void dcerpc_do_request_out_frag_next(struct tevent_req *req,
void *private_data);
state->request.blob = request;
state->request.bigendian = bigendian;
+ dcerpc_do_request_verification_trailer(req);
+ if (!tevent_req_is_in_progress(req)) {
+ return tevent_req_post(req, ev);
+ }
+
state->call->incoming.private_data = req;
state->call->incoming.handler = dcerpc_do_request_handle_in_frag;
+ DLIST_ADD_END(state->conn->calls.list, state->call, NULL);
- state->out_frag = NULL;
-
- talloc_set_destructor(state, NULL);//TODO clear state->out_frag->req
+ tevent_req_set_cleanup_fn(req, dcerpc_do_request_cleanup);
+ tevent_req_defer_callback(req, ev);
ok = tevent_queue_add(state->conn->calls.out_queue,
state->ev,
return req;
}
+static void dcerpc_do_request_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state)
+{
+ struct dcerpc_do_request_state *state =
+ tevent_req_data(req,
+ struct dcerpc_do_request_state);
+
+ if (state->out_frag != NULL) {
+ state->out_frag->req = NULL;
+ state->out_frag = NULL;
+ }
+
+ if (state->call != NULL) {
+ if (state->call == state->conn->calls.active) {
+ state->conn->calls.active = NULL;
+ }
+ ZERO_STRUCT(state->call->incoming);
+ DLIST_REMOVE(state->conn->calls.list, state->call);
+ state->call = NULL;
+ }
+}
+
+static void dcerpc_do_request_verification_trailer(struct tevent_req *req)
+{
+ struct dcerpc_do_request_state *state =
+ tevent_req_data(req,
+ struct dcerpc_do_request_state);
+ struct dcerpc_sec_verification_trailer *t;
+ struct dcerpc_sec_vt *c = NULL;
+ struct ndr_push *ndr = NULL;
+ enum ndr_err_code ndr_err;
+ size_t align = 0;
+ size_t pad = 0;
+
+ if (state->call->sec->auth_level < DCERPC_AUTH_LEVEL_INTEGRITY) {
+ return;
+ }
+
+ t = talloc_zero(state, struct dcerpc_sec_verification_trailer);
+ if (tevent_req_nomem(t, req)) {
+ return;
+ }
+
+ if (!state->call->sec->verified_bitmask1) {
+ t->commands = talloc_realloc(t, t->commands,
+ struct dcerpc_sec_vt,
+ t->count.count + 1);
+ if (tevent_req_nomem(t->commands, req)) {
+ return;
+ }
+ c = &t->commands[t->count.count++];
+ ZERO_STRUCTP(c);
+
+ c->command = DCERPC_SEC_VT_COMMAND_BITMASK1;
+ if (state->call->sec->client_hdr_signing) {
+ c->u.bitmask1 = DCERPC_SEC_VT_CLIENT_SUPPORTS_HEADER_SIGNING;
+ }
+ state->verify_bitmask1 = true;
+ }
+
+ if (!state->call->pres->verified_pcontext) {
+ t->commands = talloc_realloc(t, t->commands,
+ struct dcerpc_sec_vt,
+ t->count.count + 1);
+ if (tevent_req_nomem(t->commands, req)) {
+ return;
+ }
+ c = &t->commands[t->count.count++];
+ ZERO_STRUCTP(c);
+
+ c->command = DCERPC_SEC_VT_COMMAND_PCONTEXT;
+ c->u.pcontext.abstract_syntax = state->call->pres->table->syntax_id;
+ c->u.pcontext.transfer_syntax = state->call->pres->transfer;
+
+ state->verify_pcontext = true;
+ }
+
+ if (!state->call->sec->hdr_signing) {
+ t->commands = talloc_realloc(t, t->commands,
+ struct dcerpc_sec_vt,
+ t->count.count + 1);
+ if (tevent_req_nomem(t->commands, req)) {
+ return;
+ }
+ c = &t->commands[t->count.count++];
+ ZERO_STRUCTP(c);
+
+ c->command = DCERPC_SEC_VT_COMMAND_HEADER2;
+ c->u.header2.ptype = DCERPC_PKT_REQUEST;
+ if (state->request.bigendian) {
+ c->u.header2.drep[0] = 0;
+ } else {
+ c->u.header2.drep[0] = DCERPC_DREP_LE;
+ }
+ c->u.header2.drep[1] = 0;
+ c->u.header2.drep[2] = 0;
+ c->u.header2.drep[3] = 0;
+ c->u.header2.call_id = state->call->call_id;
+ c->u.header2.context_id = 0;
+ c->u.header2.opnum = state->opnum;
+ }
+
+ if (t->count.count == 0) {
+ TALLOC_FREE(t);
+ return;
+ }
+
+ c = &t->commands[t->count.count - 1];
+ c->command |= DCERPC_SEC_VT_COMMAND_END;
+
+ if (DEBUGLEVEL >= 10) {
+ NDR_PRINT_DEBUG(dcerpc_sec_verification_trailer, t);
+ }
+
+ ndr = ndr_push_init_ctx(state);
+ if (tevent_req_nomem(ndr, req)) {
+ return;
+ }
+
+ //TODO if (state->request.bigendian)
+
+ ndr_err = ndr_push_dcerpc_sec_verification_trailer(ndr,
+ NDR_SCALARS | NDR_BUFFERS,
+ t);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ NTSTATUS status = ndr_map_error2ntstatus(ndr_err);
+ tevent_req_nterror(req, status);
+ return;
+ }
+ state->request.trailer = ndr_push_blob(ndr);
+
+ align = state->request.blob->length & 0x3;
+ if (align > 0) {
+ pad = 4 - align;
+ }
+ if (pad > 0) {
+ bool ok;
+ uint8_t *p;
+ const uint8_t zeros[4] = { 0, };
+
+ ok = data_blob_append(ndr, &state->request.trailer, zeros, pad);
+ if (!ok) {
+ tevent_req_oom(req);
+ return;
+ }
+
+ /* move the padding to the start */
+ p = state->request.trailer.data;
+ memmove(p + pad, p, state->request.trailer.length - pad);
+ memset(p, 0, pad);
+ }
+
+ return;
+}
+
+static void dcerpc_do_request_out_frag_trans_wait1(struct tevent_req *subreq);
static void dcerpc_do_request_out_frag_done(struct tevent_req *subreq);
+static void dcerpc_do_request_out_frag_trans_wait2(struct tevent_req *subreq);
static void dcerpc_do_request_out_frag_next(struct tevent_req *req,
void *private_data)
tevent_req_data(req,
struct dcerpc_do_request_state);
struct dcerpc_do_request_out_frag *frag;
- size_t data_sent_thistime;
- size_t hdr_len = DCERPC_PKT_REQUEST;
+ size_t hdr_len = DCERPC_REQUEST_LENGTH;
size_t auth_len;
size_t frag_len;
uint8_t flags = 0;
size_t pad_len;
size_t data_left;
+ size_t data_thistime;
+ size_t trailer_left;
+ size_t trailer_thistime = 0;
+ size_t total_left;
+ size_t total_thistime;
NTSTATUS status;
union dcerpc_payload u;
- DATA_BLOB payload;
bool ok;
struct tevent_req *subreq;
+ bool use_trans = true;
if (state->object) {
flags |= DCERPC_PFC_FLAG_OBJECT_UUID;
if (tevent_req_nomem(frag, req)) {
return;
}
-
+ frag->ev = state->ev;
+ frag->conn = state->conn;
frag->req = req;
state->out_frag = frag;
data_left = state->request.blob->length - state->request.ofs;
+ trailer_left = state->request.trailer.length - state->request.trailer_ofs;
+ total_left = data_left + trailer_left;
+ if (total_left < data_left || total_left < trailer_left) {
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
+ return;
+ }
status = dcerpc_guess_pdu_sizes(state->call->sec,
- hdr_len, data_left,
+ hdr_len, total_left,
state->conn->features.max_xmit_frag,
16,//TODO
- &data_sent_thistime,
+ &total_thistime,
&frag_len, &auth_len, &pad_len);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
flags |= DCERPC_PFC_FLAG_FIRST;
}
- payload.data = state->request.blob->data + state->request.ofs;
- payload.length = data_sent_thistime;
-
- state->request.ofs += data_sent_thistime;
-
- if (state->request.blob->length == state->request.ofs) {
+ if (total_thistime == total_left) {
flags |= DCERPC_PFC_FLAG_LAST;
}
+ data_thistime = MIN(total_thistime, data_left);
+ if (data_thistime < total_thistime) {
+ trailer_thistime = total_thistime - data_thistime;
+ }
+
ZERO_STRUCT(u.request);
- u.request.alloc_hint = state->request.blob->length;// TODO
+ u.request.alloc_hint = total_left;
u.request.context_id = state->call->pres->context_id;
u.request.opnum = state->opnum;
if (state->object) {
* at this stage */
dcerpc_set_frag_length(&frag->blob, frag_len);
- /* Copy in the data. */
- ok = data_blob_append(frag, &frag->blob,
- payload.data, payload.length);
- if (!ok) {
- tevent_req_nomem(NULL, req);
- return;
+ if (data_thistime > 0) {
+ const uint8_t *data_ptr;
+
+ data_ptr = state->request.blob->data;
+ data_ptr += state->request.ofs;
+
+ /* Copy in the data. */
+ ok = data_blob_append(frag, &frag->blob,
+ data_ptr, data_thistime);
+ if (!ok) {
+ tevent_req_oom(req);
+ return;
+ }
+
+ state->request.ofs += data_thistime;
+ }
+
+ if (trailer_thistime > 0) {
+ const uint8_t *trailer_ptr;
+
+ trailer_ptr = state->request.trailer.data;
+ trailer_ptr += state->request.trailer_ofs;
+
+ /* Copy in the data. */
+ ok = data_blob_append(frag, &frag->blob,
+ trailer_ptr, trailer_thistime);
+ if (!ok) {
+ tevent_req_oom(req);
+ return;
+ }
+
+ state->request.trailer_ofs += trailer_thistime;
}
switch (state->call->sec->auth_level) {
frag->is_last = ((flags & DCERPC_PFC_FLAG_LAST) != 0);
+ if (!frag->is_last) {
+ use_trans = false;
+ }
+
+ if (frag->conn->transport.use_trans_fn == NULL) {
+ use_trans = false;
+ }
+
+ if (frag->conn->loop.subreq != NULL) {
+ use_trans = false;
+ }
+
+ if (frag->conn->features.concurrent_multiplex) {
+ use_trans = false;
+ }
+
+ if (tevent_queue_length(frag->conn->calls.out_queue) > 1) {
+ use_trans = false;
+ }
+
+ if (use_trans) {
+ frag->subreq_wait1 = tevent_queue_wait_send(frag,
+ frag->ev,
+ frag->conn->transport.write_queue);
+ if (tevent_req_nomem(req, frag->subreq_wait1)) {
+ return;
+ }
+ tevent_req_set_callback(frag->subreq_wait1,
+ dcerpc_do_request_out_frag_trans_wait1,
+ frag);
+ /*
+ * we need to block reads until our write is
+ * the next in the write queue.
+ */
+ frag->conn->loop.subreq = frag->subreq_wait1;
+ frag->conn->loop.ev = frag->ev;
+ }
+
/*
- * TODO: add smb_trans handling for ncacn_np
- *
* We need to add a dcerpc_write_fragment_queue_send/recv()
*/
frag->vector.iov_base = frag->blob.data;
frag->vector.iov_len = frag->blob.length;
- subreq = tstream_writev_queue_send(frag, state->ev,
- state->conn->transport.stream,
- state->conn->transport.write_queue,
+ subreq = tstream_writev_queue_send(frag, frag->ev,
+ frag->conn->transport.stream,
+ frag->conn->transport.write_queue,
&frag->vector, 1);
if (tevent_req_nomem(subreq, req)) {
return;
tevent_req_set_callback(subreq,
dcerpc_do_request_out_frag_done,
frag);
+
+ if (use_trans) {
+ frag->subreq_wait2 = tevent_queue_wait_send(frag,
+ frag->ev,
+ frag->conn->transport.write_queue);
+ if (tevent_req_nomem(req, frag->subreq_wait2)) {
+ return;
+ }
+ tevent_req_set_callback(frag->subreq_wait2,
+ dcerpc_do_request_out_frag_trans_wait2,
+ frag);
+ }
+
+ if (!frag->is_last) {
+ return;
+ }
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+}
+
+static void dcerpc_do_request_out_frag_trans_wait1(struct tevent_req *subreq)
+{
+ struct dcerpc_do_request_out_frag *frag =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_do_request_out_frag);
+ struct tevent_req *req = frag->req;
+ NTSTATUS status;
+ bool ok;
+
+ /*
+ * TODO; what if the caller has been free'ed?
+ */
+
+ frag->subreq_wait1 = NULL;
+ frag->conn->loop.subreq = NULL;
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ status = NT_STATUS_INTERNAL_ERROR;
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ if (tevent_queue_length(frag->conn->transport.write_queue) > 3) {
+ /*
+ * We added 3 entries into the queue,
+ * wait1, writev and wait2.
+ *
+ * There's more to write, we should not block
+ * further writev calls for a trans call.
+ *
+ * The wait2 stage will trigger the read.
+ */
+ TALLOC_FREE(subreq);
+ return;
+ }
+
+ /*
+ * we don't need wait2 anymore, we're sure that
+ * we'll do a trans call.
+ */
+ TALLOC_FREE(frag->subreq_wait2);
+
+ status = frag->conn->transport.use_trans_fn(frag->conn->transport.stream);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ /* we free subreq after tstream_cli_np_use_trans */
+ TALLOC_FREE(subreq);
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
}
static void dcerpc_do_request_out_frag_done(struct tevent_req *subreq)
return;
}
+ if (frag->subreq_wait2 != NULL) {
+ return;
+ }
+
if (frag->is_last) {
TALLOC_FREE(frag);
return;
dcerpc_do_request_out_frag_next(req, NULL);
}
+static void dcerpc_do_request_out_frag_trans_wait2(struct tevent_req *subreq)
+{
+ struct dcerpc_do_request_out_frag *frag =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_do_request_out_frag);
+ struct tevent_req *req = frag->req;
+ NTSTATUS status;
+ bool ok;
+
+ frag->subreq_wait2 = NULL;
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ status = NT_STATUS_INTERNAL_ERROR;
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ TALLOC_FREE(subreq);
+
+ status = dcerpc_connection_loop_restart(frag->conn, frag->ev);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(frag);
+ if (req) {
+ tevent_req_nterror(req, status);
+ }
+ //dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ TALLOC_FREE(frag);
+
+ /* we need to wait for incoming pdus */
+}
+
static NTSTATUS dcerpc_do_request_handle_in_frag(void *private_data,
struct ncacn_packet *pkt,
DATA_BLOB frag)
return error;
}
+ if (!state->got_first) {
+ state->got_first = true;
+
+ if (pkt->drep[0] & DCERPC_DREP_LE) {
+ state->response.bigendian = false;
+ } else {
+ state->response.bigendian = true;
+ }
+
+ if (state->verify_bitmask1) {
+ state->call->sec->verified_bitmask1 = true;
+ }
+
+ if (state->verify_pcontext) {
+ state->call->pres->verified_pcontext = true;
+ }
+ }
+
+ if (state->response.bigendian) {
+ if (pkt->drep[0] != 0) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ } else {
+ if (pkt->drep[0] != DCERPC_DREP_LE) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ }
+ if (pkt->drep[1] != 0) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ if (pkt->drep[2] != 0) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+ if (pkt->drep[3] != 0) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ if (pkt->u.response.context_id != state->call->pres->context_id) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
if (frag.length < DCERPC_RESPONSE_LENGTH + pad_len) {
return NT_STATUS_RPC_PROTOCOL_ERROR;
}
payload.length = frag.length - DCERPC_RESPONSE_LENGTH;
}
- if (pkt->pfc_flags & DCERPC_PFC_FLAG_LAST) {
- if (pkt->drep[0] & DCERPC_DREP_LE) {
- state->response.bigendian = false;
- } else {
- state->response.bigendian = true;
- }
- }
-
DEBUG(10, ("Got pdu len %lu, data_len %lu, ss_len %u\n",
(long unsigned int)frag.length,
(long unsigned int)payload.length,
tevent_req_done(req);//TODO
return NT_STATUS_OK;
}
- return NT_STATUS_OK;
+
+ return dcerpc_connection_loop_restart(state->conn, state->ev);
case DCERPC_PKT_FAULT: