2 Unix SMB/CIFS implementation.
4 SMB2 client transport context management functions
6 Copyright (C) Andrew Tridgell 2005
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "libcli/raw/raw_proto.h"
25 #include "libcli/smb2/smb2.h"
26 #include "libcli/smb2/smb2_calls.h"
27 #include "lib/socket/socket.h"
28 #include "lib/events/events.h"
29 #include "lib/stream/packet.h"
30 #include "lib/util/dlinklist.h"
34 an event has happened on the socket
36 static void smb2_transport_event_handler(struct event_context *ev,
38 uint16_t flags, void *private)
40 struct smb2_transport *transport = talloc_get_type(private,
41 struct smb2_transport);
42 if (flags & EVENT_FD_READ) {
43 packet_recv(transport->packet);
46 if (flags & EVENT_FD_WRITE) {
47 packet_queue_run(transport->packet);
54 static int transport_destructor(struct smb2_transport *transport)
56 smb2_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
64 static void smb2_transport_error(void *private, NTSTATUS status)
66 struct smb2_transport *transport = talloc_get_type(private,
67 struct smb2_transport);
68 smb2_transport_dead(transport, status);
71 static NTSTATUS smb2_transport_finish_recv(void *private, DATA_BLOB blob);
74 create a transport structure based on an established socket
76 struct smb2_transport *smb2_transport_init(struct smbcli_socket *sock,
77 TALLOC_CTX *parent_ctx)
79 struct smb2_transport *transport;
81 transport = talloc_zero(parent_ctx, struct smb2_transport);
82 if (!transport) return NULL;
84 transport->socket = talloc_steal(transport, sock);
86 /* setup the stream -> packet parser */
87 transport->packet = packet_init(transport);
88 if (transport->packet == NULL) {
89 talloc_free(transport);
92 packet_set_private(transport->packet, transport);
93 packet_set_socket(transport->packet, transport->socket->sock);
94 packet_set_callback(transport->packet, smb2_transport_finish_recv);
95 packet_set_full_request(transport->packet, packet_full_request_nbt);
96 packet_set_error_handler(transport->packet, smb2_transport_error);
97 packet_set_event_context(transport->packet, transport->socket->event.ctx);
98 packet_set_nofree(transport->packet);
100 /* take over event handling from the socket layer - it only
101 handles events up until we are connected */
102 talloc_free(transport->socket->event.fde);
103 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
105 socket_get_fd(transport->socket->sock),
107 smb2_transport_event_handler,
110 packet_set_fde(transport->packet, transport->socket->event.fde);
111 packet_set_serialise(transport->packet);
113 talloc_set_destructor(transport, transport_destructor);
115 transport->options.timeout = 30;
121 mark the transport as dead
123 void smb2_transport_dead(struct smb2_transport *transport, NTSTATUS status)
125 smbcli_sock_dead(transport->socket);
127 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
128 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
131 /* kill all pending receives */
132 while (transport->pending_recv) {
133 struct smb2_request *req = transport->pending_recv;
134 req->state = SMB2_REQUEST_ERROR;
135 req->status = status;
136 DLIST_REMOVE(transport->pending_recv, req);
143 static bool smb2_handle_oplock_break(struct smb2_transport *transport,
144 const DATA_BLOB *blob)
150 hdr = blob->data+NBT_HDR_SIZE;
152 if (blob->length < (SMB2_MIN_SIZE+0x18)) {
153 DEBUG(1,("Discarding smb2 oplock reply of size %u\n",
158 opcode = SVAL(hdr, SMB2_HDR_OPCODE);
159 seqnum = BVAL(hdr, SMB2_HDR_MESSAGE_ID);
161 if ((opcode != SMB2_OP_BREAK) ||
162 (seqnum != UINT64_MAX)) {
166 if (transport->oplock.handler) {
167 uint8_t *body = hdr+SMB2_HDR_BODY;
168 struct smb2_handle h;
171 level = CVAL(body, 0x02);
172 smb2_pull_handle(body+0x08, &h);
174 transport->oplock.handler(transport, &h, level,
175 transport->oplock.private_data);
182 we have a full request in our receive buffer - match it to a pending request
185 static NTSTATUS smb2_transport_finish_recv(void *private, DATA_BLOB blob)
187 struct smb2_transport *transport = talloc_get_type(private,
188 struct smb2_transport);
189 uint8_t *buffer, *hdr;
191 struct smb2_request *req = NULL;
194 uint16_t buffer_code;
195 uint32_t dynamic_size;
201 hdr = buffer+NBT_HDR_SIZE;
203 if (len < SMB2_MIN_SIZE) {
204 DEBUG(1,("Discarding smb2 reply of size %d\n", len));
208 if (smb2_handle_oplock_break(transport, &blob)) {
213 flags = IVAL(hdr, SMB2_HDR_FLAGS);
214 seqnum = BVAL(hdr, SMB2_HDR_MESSAGE_ID);
216 /* match the incoming request against the list of pending requests */
217 for (req=transport->pending_recv; req; req=req->next) {
218 if (req->seqnum == seqnum) break;
222 DEBUG(1,("Discarding unmatched reply with seqnum 0x%llx op %d\n",
223 (long long)seqnum, SVAL(hdr, SMB2_HDR_OPCODE)));
227 /* fill in the 'in' portion of the matching request */
228 req->in.buffer = buffer;
229 talloc_steal(req, buffer);
231 req->in.allocated = req->in.size;
234 req->in.body = hdr+SMB2_HDR_BODY;
235 req->in.body_size = req->in.size - (SMB2_HDR_BODY+NBT_HDR_SIZE);
236 req->status = NT_STATUS(IVAL(hdr, SMB2_HDR_STATUS));
238 if (NT_STATUS_EQUAL(req->status, STATUS_PENDING)) {
239 if (flags & 0x00000002) {
240 req->cancel.can_cancel = true;
241 req->cancel.pending_id = IVAL(hdr, SMB2_HDR_PID);
242 for (i=0; i< req->cancel.do_cancel; i++) {
250 buffer_code = SVAL(req->in.body, 0);
251 req->in.body_fixed = (buffer_code & ~1);
252 req->in.dynamic = NULL;
253 dynamic_size = req->in.body_size - req->in.body_fixed;
254 if (dynamic_size != 0 && (buffer_code & 1)) {
255 req->in.dynamic = req->in.body + req->in.body_fixed;
256 if (smb2_oob(&req->in, req->in.dynamic, dynamic_size)) {
257 DEBUG(1,("SMB2 request invalid dynamic size 0x%x\n",
263 smb2_setup_bufinfo(req);
265 DEBUG(2, ("SMB2 RECV seqnum=0x%llx\n", (long long)req->seqnum));
266 dump_data(5, req->in.body, req->in.body_size);
268 /* if this request has an async handler then call that to
269 notify that the reply has been received. This might destroy
270 the request so it must happen last */
271 DLIST_REMOVE(transport->pending_recv, req);
272 req->state = SMB2_REQUEST_DONE;
279 dump_data(5, buffer, len);
281 DLIST_REMOVE(transport->pending_recv, req);
282 req->state = SMB2_REQUEST_ERROR;
289 return NT_STATUS_UNSUCCESSFUL;
293 handle timeouts of individual smb requests
295 static void smb2_timeout_handler(struct event_context *ev, struct timed_event *te,
296 struct timeval t, void *private)
298 struct smb2_request *req = talloc_get_type(private, struct smb2_request);
300 if (req->state == SMB2_REQUEST_RECV) {
301 DLIST_REMOVE(req->transport->pending_recv, req);
303 req->status = NT_STATUS_IO_TIMEOUT;
304 req->state = SMB2_REQUEST_ERROR;
314 static int smb2_request_destructor(struct smb2_request *req)
316 if (req->state == SMB2_REQUEST_RECV) {
317 DLIST_REMOVE(req->transport->pending_recv, req);
324 put a request into the send queue
326 void smb2_transport_send(struct smb2_request *req)
331 _smb2_setlen(req->out.buffer, req->out.size - NBT_HDR_SIZE);
333 DEBUG(2, ("SMB2 send seqnum=0x%llx\n", (long long)req->seqnum));
334 dump_data(5, req->out.body, req->out.body_size);
336 /* check if the transport is dead */
337 if (req->transport->socket->sock == NULL) {
338 req->state = SMB2_REQUEST_ERROR;
339 req->status = NT_STATUS_NET_WRITE_FAULT;
343 blob = data_blob_const(req->out.buffer, req->out.size);
344 status = packet_send(req->transport->packet, blob);
345 if (!NT_STATUS_IS_OK(status)) {
346 req->state = SMB2_REQUEST_ERROR;
347 req->status = status;
351 req->state = SMB2_REQUEST_RECV;
352 DLIST_ADD(req->transport->pending_recv, req);
355 if (req->transport->options.timeout) {
356 event_add_timed(req->transport->socket->event.ctx, req,
357 timeval_current_ofs(req->transport->options.timeout, 0),
358 smb2_timeout_handler, req);
361 talloc_set_destructor(req, smb2_request_destructor);
364 static void idle_handler(struct event_context *ev,
365 struct timed_event *te, struct timeval t, void *private)
367 struct smb2_transport *transport = talloc_get_type(private,
368 struct smb2_transport);
369 struct timeval next = timeval_add(&t, 0, transport->idle.period);
370 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
373 idle_handler, transport);
374 transport->idle.func(transport, transport->idle.private);
378 setup the idle handler for a transport
379 the period is in microseconds
381 void smb2_transport_idle_handler(struct smb2_transport *transport,
382 void (*idle_func)(struct smb2_transport *, void *),
386 transport->idle.func = idle_func;
387 transport->idle.private = private;
388 transport->idle.period = period;
390 if (transport->socket->event.te != NULL) {
391 talloc_free(transport->socket->event.te);
394 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
396 timeval_current_ofs(0, period),
397 idle_handler, transport);