2 * Socket and pipe I/O utilities used in rsync.
4 * Copyright (C) 1996-2001 Andrew Tridgell
5 * Copyright (C) 1996 Paul Mackerras
6 * Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org>
7 * Copyright (C) 2003-2022 Wayne Davison
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, visit the http://fsf.org website.
23 /* Rsync provides its own multiplexing system, which is used to send
24 * stderr and stdout over a single socket.
26 * For historical reasons this is off during the start of the
27 * connection, but it's switched on quite early using
28 * io_start_multiplex_out() and io_start_multiplex_in(). */
34 /** If no timeout is specified then use a 60 second select timeout */
35 #define SELECT_TIMEOUT 60
38 extern size_t bwlimit_writemax;
39 extern int io_timeout;
42 extern int am_receiver;
43 extern int am_generator;
44 extern int msgs2stderr;
45 extern int inc_recurse;
50 extern int file_total;
51 extern int file_old_total;
53 extern int read_batch;
54 extern int compat_flags;
55 extern int protect_args;
56 extern int checksum_seed;
57 extern int daemon_connection;
58 extern int protocol_version;
59 extern int remove_source_files;
60 extern int preserve_hard_links;
61 extern BOOL extra_flist_sending_enabled;
62 extern BOOL flush_ok_after_signal;
63 extern struct stats stats;
64 extern time_t stop_at_utime;
65 extern struct file_list *cur_flist;
67 extern int filesfrom_convert;
68 extern iconv_t ic_send, ic_recv;
71 int csum_length = SHORT_SUM_LENGTH; /* initial value */
74 int forward_flist_data = 0;
75 BOOL flist_receiving_enabled = False;
77 /* Ignore an EOF error if non-zero. See whine_about_eof(). */
78 int kluge_around_eof = 0;
79 int got_kill_signal = -1; /* is set to 0 only after multiplexed I/O starts */
84 int64 total_data_read = 0;
85 int64 total_data_written = 0;
90 int out_fd; /* Both "out" and "msg" go to this fd. */
92 unsigned out_empty_len;
93 size_t raw_data_header_pos; /* in the out xbuf */
94 size_t raw_flushing_ends_before; /* in the out xbuf */
95 size_t raw_input_ends_before; /* in the in xbuf */
96 } iobuf = { .in_fd = -1, .out_fd = -1 };
98 static time_t last_io_in;
99 static time_t last_io_out;
101 static int write_batch_monitor_in = -1;
102 static int write_batch_monitor_out = -1;
104 static int ff_forward_fd = -1;
105 static int ff_reenable_multiplex = -1;
106 static char ff_lastchar = '\0';
107 static xbuf ff_xb = EMPTY_XBUF;
109 static xbuf iconv_buf = EMPTY_XBUF;
111 static int select_timeout = SELECT_TIMEOUT;
112 static int active_filecnt = 0;
113 static OFF_T active_bytecnt = 0;
114 static int first_message = 1;
116 static char int_byte_extra[64] = {
117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (00 - 3F)/4 */
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (40 - 7F)/4 */
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* (80 - BF)/4 */
120 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, /* (C0 - FF)/4 */
123 /* Our I/O buffers are sized with no bits on in the lowest byte of the "size"
124 * (indeed, our rounding of sizes in 1024-byte units assures more than this).
125 * This allows the code that is storing bytes near the physical end of a
126 * circular buffer to temporarily reduce the buffer's size (in order to make
127 * some storing idioms easier), while also making it simple to restore the
128 * buffer's actual size when the buffer's "pos" wraps around to the start (we
129 * just round the buffer's size up again). */
131 #define IOBUF_WAS_REDUCED(siz) ((siz) & 0xFF)
132 #define IOBUF_RESTORE_SIZE(siz) (((siz) | 0xFF) + 1)
134 #define IN_MULTIPLEXED (iobuf.in_multiplexed != 0)
135 #define IN_MULTIPLEXED_AND_READY (iobuf.in_multiplexed > 0)
136 #define OUT_MULTIPLEXED (iobuf.out_empty_len != 0)
138 #define PIO_NEED_INPUT (1<<0) /* The *_NEED_* flags are mutually exclusive. */
139 #define PIO_NEED_OUTROOM (1<<1)
140 #define PIO_NEED_MSGROOM (1<<2)
142 #define PIO_CONSUME_INPUT (1<<4) /* Must becombined with PIO_NEED_INPUT. */
144 #define PIO_INPUT_AND_CONSUME (PIO_NEED_INPUT | PIO_CONSUME_INPUT)
145 #define PIO_NEED_FLAGS (PIO_NEED_INPUT | PIO_NEED_OUTROOM | PIO_NEED_MSGROOM)
147 #define REMOTE_OPTION_ERROR "rsync: on remote machine: -"
148 #define REMOTE_OPTION_ERROR2 ": unknown option"
150 #define FILESFROM_BUFLEN 2048
152 enum festatus { FES_SUCCESS, FES_REDO, FES_NO_SEND };
154 static flist_ndx_list redo_list, hlink_list;
156 static void read_a_msg(void);
157 static void drain_multiplex_messages(void);
158 static void sleep_for_bwlimit(int bytes_written);
160 static void check_timeout(BOOL allow_keepalive, int keepalive_flags)
164 /* On the receiving side, the generator is now the one that decides
165 * when a timeout has occurred. When it is sifting through a lot of
166 * files looking for work, it will be sending keep-alive messages to
167 * the sender, and even though the receiver won't be sending/receiving
168 * anything (not even keep-alive messages), the successful writes to
169 * the sender will keep things going. If the receiver is actively
170 * receiving data, it will ensure that the generator knows that it is
171 * not idle by sending the generator keep-alive messages (since the
172 * generator might be blocked trying to send checksums, it needs to
173 * know that the receiver is active). Thus, as long as one or the
174 * other is successfully doing work, the generator will not timeout. */
180 if (allow_keepalive) {
181 /* This may put data into iobuf.msg w/o flushing. */
182 maybe_send_keepalive(t, keepalive_flags);
191 chk = MAX(last_io_out, last_io_in);
192 if (t - chk >= io_timeout) {
195 rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n",
196 who_am_i(), (int)(t-chk));
197 exit_cleanup(RERR_TIMEOUT);
201 /* It's almost always an error to get an EOF when we're trying to read from the
202 * network, because the protocol is (for the most part) self-terminating.
204 * There is one case for the receiver when it is at the end of the transfer
205 * (hanging around reading any keep-alive packets that might come its way): if
206 * the sender dies before the generator's kill-signal comes through, we can end
207 * up here needing to loop until the kill-signal arrives. In this situation,
208 * kluge_around_eof will be < 0.
210 * There is another case for older protocol versions (< 24) where the module
211 * listing was not terminated, so we must ignore an EOF error in that case and
212 * exit. In this situation, kluge_around_eof will be > 0. */
213 static NORETURN void whine_about_eof(BOOL allow_kluge)
215 if (kluge_around_eof && allow_kluge) {
217 if (kluge_around_eof > 0)
219 /* If we're still here after 10 seconds, exit with an error. */
220 for (i = 10*1000/20; i--; )
224 rprintf(FERROR, RSYNC_NAME ": connection unexpectedly closed "
225 "(%s bytes received so far) [%s]\n",
226 big_num(stats.total_read), who_am_i());
228 exit_cleanup(RERR_STREAMIO);
231 /* Do a safe read, handling any needed looping and error handling.
232 * Returns the count of the bytes read, which will only be different
233 * from "len" if we encountered an EOF. This routine is not used on
234 * the socket except very early in the transfer. */
235 static size_t safe_read(int fd, char *buf, size_t len)
239 assert(fd != iobuf.in_fd);
250 tv.tv_sec = select_timeout;
253 cnt = select(fd+1, &r_fds, NULL, &e_fds, &tv);
255 if (cnt < 0 && errno == EBADF) {
256 rsyserr(FERROR, errno, "safe_read select failed");
257 exit_cleanup(RERR_FILEIO);
259 check_timeout(1, MSK_ALLOW_FLUSH);
263 /*if (FD_ISSET(fd, &e_fds))
264 rprintf(FINFO, "select exception on fd %d\n", fd); */
266 if (FD_ISSET(fd, &r_fds)) {
267 ssize_t n = read(fd, buf + got, len - got);
268 if (DEBUG_GTE(IO, 2)) {
269 rprintf(FINFO, "[%s] safe_read(%d)=%" SIZE_T_FMT_MOD "d\n",
270 who_am_i(), fd, (SIZE_T_FMT_CAST)n);
277 rsyserr(FERROR, errno, "safe_read failed to read %" SIZE_T_FMT_MOD "d bytes",
278 (SIZE_T_FMT_CAST)len);
279 exit_cleanup(RERR_STREAMIO);
281 if ((got += (size_t)n) == len)
289 static const char *what_fd_is(int fd)
293 if (fd == sock_f_out)
295 else if (fd == iobuf.out_fd)
297 else if (fd == batch_fd)
300 snprintf(buf, sizeof buf, "fd %d", fd);
305 /* Do a safe write, handling any needed looping and error handling.
306 * Returns only if everything was successfully written. This routine
307 * is not used on the socket except very early in the transfer. */
308 static void safe_write(int fd, const char *buf, size_t len)
312 assert(fd != iobuf.out_fd);
314 n = write(fd, buf, len);
315 if ((size_t)n == len)
318 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
320 rsyserr(FERROR, errno,
321 "safe_write failed to write %" SIZE_T_FMT_MOD "d bytes to %s",
322 (SIZE_T_FMT_CAST)len, what_fd_is(fd));
323 exit_cleanup(RERR_STREAMIO);
337 tv.tv_sec = select_timeout;
340 cnt = select(fd + 1, NULL, &w_fds, NULL, &tv);
342 if (cnt < 0 && errno == EBADF) {
343 rsyserr(FERROR, errno, "safe_write select failed on %s", what_fd_is(fd));
344 exit_cleanup(RERR_FILEIO);
347 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
351 if (FD_ISSET(fd, &w_fds)) {
352 n = write(fd, buf, len);
364 /* This is only called when files-from data is known to be available. We read
365 * a chunk of data and put it into the output buffer. */
366 static void forward_filesfrom_data(void)
370 len = read(ff_forward_fd, ff_xb.buf + ff_xb.len, ff_xb.size - ff_xb.len);
372 if (len == 0 || errno != EINTR) {
373 /* Send end-of-file marker */
375 write_buf(iobuf.out_fd, "\0\0", ff_lastchar ? 2 : 1);
377 if (ff_reenable_multiplex >= 0)
378 io_start_multiplex_out(ff_reenable_multiplex);
379 free_implied_include_partial_string();
384 if (DEBUG_GTE(IO, 2)) {
385 rprintf(FINFO, "[%s] files-from read=%" SIZE_T_FMT_MOD "d\n",
386 who_am_i(), (SIZE_T_FMT_CAST)len);
394 char *s = ff_xb.buf + len;
395 /* Transform CR and/or LF into '\0' */
396 while (s-- > ff_xb.buf) {
397 if (*s == '\n' || *s == '\r')
406 /* Last buf ended with a '\0', so don't let this buf start with one. */
407 while (len && *s == '\0')
409 ff_xb.pos = s - ff_xb.buf;
413 if (filesfrom_convert && len) {
414 char *sob = ff_xb.buf + ff_xb.pos, *s = sob;
415 char *eob = sob + len;
416 int flags = ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT;
417 if (ff_lastchar == '\0')
419 /* Convert/send each null-terminated string separately, skipping empties. */
422 ff_xb.len = s - sob - 1;
423 add_implied_include(sob, 0);
424 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
425 exit_cleanup(RERR_PROTOCOL); /* impossible? */
426 write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
427 while (s != eob && *s == '\0')
430 ff_xb.pos = sob - ff_xb.buf;
435 if ((ff_xb.len = s - sob) == 0)
438 /* Handle a partial string specially, saving any incomplete chars. */
439 implied_include_partial_string(sob, s);
440 flags &= ~ICB_INCLUDE_INCOMPLETE;
441 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) {
443 exit_cleanup(RERR_PROTOCOL); /* impossible? */
445 memmove(ff_xb.buf, ff_xb.buf + ff_xb.pos, ff_xb.len);
447 ff_lastchar = 'x'; /* Anything non-zero. */
453 char *f = ff_xb.buf + ff_xb.pos;
457 /* Eliminate any multi-'\0' runs. */
459 if (!(*t++ = *f++)) {
460 add_implied_include(cur, 0);
462 while (f != eob && *f == '\0')
466 implied_include_partial_string(cur, t);
468 if ((len = t - ff_xb.buf) != 0) {
469 /* This will not circle back to perform_io() because we only get
470 * called when there is plenty of room in the output buffer. */
471 write_buf(iobuf.out_fd, ff_xb.buf, len);
476 void reduce_iobuf_size(xbuf *out, size_t new_size)
478 if (new_size < out->size) {
479 /* Avoid weird buffer interactions by only outputting this to stderr. */
480 if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) {
481 const char *name = out == &iobuf.out ? "iobuf.out"
482 : out == &iobuf.msg ? "iobuf.msg"
485 rprintf(FINFO, "[%s] reduced size of %s (-%d)\n",
486 who_am_i(), name, (int)(out->size - new_size));
489 out->size = new_size;
493 void restore_iobuf_size(xbuf *out)
495 if (IOBUF_WAS_REDUCED(out->size)) {
496 size_t new_size = IOBUF_RESTORE_SIZE(out->size);
497 /* Avoid weird buffer interactions by only outputting this to stderr. */
498 if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) {
499 const char *name = out == &iobuf.out ? "iobuf.out"
500 : out == &iobuf.msg ? "iobuf.msg"
503 rprintf(FINFO, "[%s] restored size of %s (+%d)\n",
504 who_am_i(), name, (int)(new_size - out->size));
507 out->size = new_size;
511 static void handle_kill_signal(BOOL flush_ok)
513 got_kill_signal = -1;
514 flush_ok_after_signal = flush_ok;
515 exit_cleanup(RERR_SIGNAL);
518 /* Perform buffered input and/or output until specified conditions are met.
519 * When given a "needed" read or write request, this returns without doing any
520 * I/O if the needed input bytes or write space is already available. Once I/O
521 * is needed, this will try to do whatever reading and/or writing is currently
522 * possible, up to the maximum buffer allowances, no matter if this is a read
523 * or write request. However, the I/O stops as soon as the required input
524 * bytes or output space is available. If this is not a read request, the
525 * routine may also do some advantageous reading of messages from a multiplexed
526 * input source (which ensures that we don't jam up with everyone in their
527 * "need to write" code and nobody reading the accumulated data that would make
530 * The iobuf.in, .out and .msg buffers are all circular. Callers need to be
531 * aware that some data copies will need to be split when the bytes wrap around
532 * from the end to the start. In order to help make writing into the output
533 * buffers easier for some operations (such as the use of SIVAL() into the
534 * buffer) a buffer may be temporarily shortened by a small amount, but the
535 * original size will be automatically restored when the .pos wraps to the
536 * start. See also the 3 raw_* iobuf vars that are used in the handling of
537 * MSG_DATA bytes as they are read-from/written-into the buffers.
539 * When writing, we flush data in the following priority order:
541 * 1. Finish writing any in-progress MSG_DATA sequence from iobuf.out.
543 * 2. Write out all the messages from the message buf (if iobuf.msg is active).
544 * Yes, this means that a PIO_NEED_OUTROOM call will completely flush any
545 * messages before getting to the iobuf.out flushing (except for rule 1).
547 * 3. Write out the raw data from iobuf.out, possibly filling in the multiplexed
548 * MSG_DATA header that was pre-allocated (when output is multiplexed).
550 * TODO: items for possible future work:
552 * - Make this routine able to read the generator-to-receiver batch flow?
554 * Unlike the old routines that this replaces, it is OK to read ahead as far as
555 * we can because the read_a_msg() routine now reads its bytes out of the input
556 * buffer. In the old days, only raw data was in the input buffer, and any
557 * unused raw data in the buf would prevent the reading of socket data. */
558 static char *perform_io(size_t needed, int flags)
560 fd_set r_fds, e_fds, w_fds;
563 size_t empty_buf_len = 0;
567 if (iobuf.in.len == 0 && iobuf.in.pos != 0) {
568 if (iobuf.raw_input_ends_before)
569 iobuf.raw_input_ends_before -= iobuf.in.pos;
573 switch (flags & PIO_NEED_FLAGS) {
575 /* We never resize the circular input buffer. */
576 if (iobuf.in.size < needed) {
577 rprintf(FERROR, "need to read %" SIZE_T_FMT_MOD "d bytes,"
578 " iobuf.in.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
579 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.in.size);
580 exit_cleanup(RERR_PROTOCOL);
583 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
584 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %sinput)\n",
585 who_am_i(), (SIZE_T_FMT_CAST)needed, flags & PIO_CONSUME_INPUT ? "consume&" : "");
589 case PIO_NEED_OUTROOM:
590 /* We never resize the circular output buffer. */
591 if (iobuf.out.size - iobuf.out_empty_len < needed) {
592 fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes,"
593 " iobuf.out.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
594 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)(iobuf.out.size - iobuf.out_empty_len));
595 exit_cleanup(RERR_PROTOCOL);
598 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
599 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d,"
600 " outroom) needs to flush %" SIZE_T_FMT_MOD "d\n",
601 who_am_i(), (SIZE_T_FMT_CAST)needed,
602 iobuf.out.len + needed > iobuf.out.size
603 ? (SIZE_T_FMT_CAST)(iobuf.out.len + needed - iobuf.out.size) : (SIZE_T_FMT_CAST)0);
607 case PIO_NEED_MSGROOM:
608 /* We never resize the circular message buffer. */
609 if (iobuf.msg.size < needed) {
610 fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes,"
611 " iobuf.msg.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
612 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.msg.size);
613 exit_cleanup(RERR_PROTOCOL);
616 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
617 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d,"
618 " msgroom) needs to flush %" SIZE_T_FMT_MOD "d\n",
619 who_am_i(), (SIZE_T_FMT_CAST)needed,
620 iobuf.msg.len + needed > iobuf.msg.size
621 ? (SIZE_T_FMT_CAST)(iobuf.msg.len + needed - iobuf.msg.size) : (SIZE_T_FMT_CAST)0);
626 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
627 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %d)\n",
628 who_am_i(), (SIZE_T_FMT_CAST)needed, flags);
633 exit_cleanup(RERR_UNSUPPORTED);
637 switch (flags & PIO_NEED_FLAGS) {
639 if (iobuf.in.len >= needed)
642 case PIO_NEED_OUTROOM:
643 /* Note that iobuf.out_empty_len doesn't factor into this check
644 * because iobuf.out.len already holds any needed header len. */
645 if (iobuf.out.len + needed <= iobuf.out.size)
648 case PIO_NEED_MSGROOM:
649 if (iobuf.msg.len + needed <= iobuf.msg.size)
658 if (iobuf.in_fd >= 0 && iobuf.in.size - iobuf.in.len) {
659 if (!read_batch || batch_fd >= 0) {
660 FD_SET(iobuf.in_fd, &r_fds);
661 FD_SET(iobuf.in_fd, &e_fds);
663 if (iobuf.in_fd > max_fd)
664 max_fd = iobuf.in_fd;
667 /* Only do more filesfrom processing if there is enough room in the out buffer. */
668 if (ff_forward_fd >= 0 && iobuf.out.size - iobuf.out.len > FILESFROM_BUFLEN*2) {
669 FD_SET(ff_forward_fd, &r_fds);
670 if (ff_forward_fd > max_fd)
671 max_fd = ff_forward_fd;
675 if (iobuf.out_fd >= 0) {
676 if (iobuf.raw_flushing_ends_before
677 || (!iobuf.msg.len && iobuf.out.len > iobuf.out_empty_len && !(flags & PIO_NEED_MSGROOM))) {
678 if (OUT_MULTIPLEXED && !iobuf.raw_flushing_ends_before) {
679 /* The iobuf.raw_flushing_ends_before value can point off the end
680 * of the iobuf.out buffer for a while, for easier subtracting. */
681 iobuf.raw_flushing_ends_before = iobuf.out.pos + iobuf.out.len;
683 SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0,
684 ((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4);
686 if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) {
687 rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n",
688 who_am_i(), (int)MSG_DATA, (SIZE_T_FMT_CAST)iobuf.out.len - 4);
691 /* reserve room for the next MSG_DATA header */
692 iobuf.raw_data_header_pos = iobuf.raw_flushing_ends_before;
693 if (iobuf.raw_data_header_pos >= iobuf.out.size)
694 iobuf.raw_data_header_pos -= iobuf.out.size;
695 else if (iobuf.raw_data_header_pos + 4 > iobuf.out.size) {
696 /* The 4-byte header won't fit at the end of the buffer,
697 * so we'll temporarily reduce the output buffer's size
698 * and put the header at the start of the buffer. */
699 reduce_iobuf_size(&iobuf.out, iobuf.raw_data_header_pos);
700 iobuf.raw_data_header_pos = 0;
702 /* Yes, it is possible for this to make len > size for a while. */
706 empty_buf_len = iobuf.out_empty_len;
708 } else if (iobuf.msg.len) {
714 FD_SET(iobuf.out_fd, &w_fds);
715 if (iobuf.out_fd > max_fd)
716 max_fd = iobuf.out_fd;
722 switch (flags & PIO_NEED_FLAGS) {
725 if (kluge_around_eof == 2)
727 if (iobuf.in_fd == -2)
728 whine_about_eof(True);
729 rprintf(FERROR, "error in perform_io: no fd for input.\n");
730 exit_cleanup(RERR_PROTOCOL);
731 case PIO_NEED_OUTROOM:
732 case PIO_NEED_MSGROOM:
734 drain_multiplex_messages();
735 if (iobuf.out_fd == -2)
736 whine_about_eof(True);
737 rprintf(FERROR, "error in perform_io: no fd for output.\n");
738 exit_cleanup(RERR_PROTOCOL);
740 /* No stated needs, so I guess this is OK. */
746 if (got_kill_signal > 0)
747 handle_kill_signal(True);
749 if (extra_flist_sending_enabled) {
750 if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY)
753 extra_flist_sending_enabled = False;
754 tv.tv_sec = select_timeout;
757 tv.tv_sec = select_timeout;
760 cnt = select(max_fd + 1, &r_fds, &w_fds, &e_fds, &tv);
763 if (cnt < 0 && errno == EBADF) {
765 exit_cleanup(RERR_SOCKETIO);
767 if (extra_flist_sending_enabled) {
768 extra_flist_sending_enabled = False;
769 send_extra_file_list(sock_f_out, -1);
770 extra_flist_sending_enabled = !flist_eof;
772 check_timeout((flags & PIO_NEED_INPUT) != 0, 0);
773 FD_ZERO(&r_fds); /* Just in case... */
777 if (iobuf.in_fd >= 0 && FD_ISSET(iobuf.in_fd, &r_fds)) {
778 size_t len, pos = iobuf.in.pos + iobuf.in.len;
780 if (pos >= iobuf.in.size) {
781 pos -= iobuf.in.size;
782 len = iobuf.in.size - iobuf.in.len;
784 len = iobuf.in.size - pos;
785 if ((n = read(iobuf.in_fd, iobuf.in.buf + pos, len)) <= 0) {
787 /* Signal that input has become invalid. */
788 if (!read_batch || batch_fd < 0 || am_generator)
793 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
796 /* Don't write errors on a dead socket. */
797 if (iobuf.in_fd == sock_f_in) {
800 rsyserr(FERROR_SOCKET, errno, "read error");
802 rsyserr(FERROR, errno, "read error");
803 exit_cleanup(RERR_SOCKETIO);
806 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
807 rprintf(FINFO, "[%s] recv=%" SIZE_T_FMT_MOD "d\n",
808 who_am_i(), (SIZE_T_FMT_CAST)n);
812 last_io_in = time(NULL);
813 if (io_timeout && flags & PIO_NEED_INPUT)
814 maybe_send_keepalive(last_io_in, 0);
816 stats.total_read += n;
821 if (stop_at_utime && time(NULL) >= stop_at_utime) {
822 rprintf(FERROR, "stopping at requested limit\n");
823 exit_cleanup(RERR_TIMEOUT);
826 if (out && FD_ISSET(iobuf.out_fd, &w_fds)) {
827 size_t len = iobuf.raw_flushing_ends_before ? iobuf.raw_flushing_ends_before - out->pos : out->len;
830 if (bwlimit_writemax && len > bwlimit_writemax)
831 len = bwlimit_writemax;
833 if (out->pos + len > out->size)
834 len = out->size - out->pos;
835 if ((n = write(iobuf.out_fd, out->buf + out->pos, len)) <= 0) {
836 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
839 /* Don't write errors on a dead socket. */
842 iobuf.out.len = iobuf.msg.len = iobuf.raw_flushing_ends_before = 0;
843 rsyserr(FERROR_SOCKET, errno, "write error");
844 drain_multiplex_messages();
845 exit_cleanup(RERR_SOCKETIO);
848 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
849 rprintf(FINFO, "[%s] %s sent=%" SIZE_T_FMT_MOD "d\n",
850 who_am_i(), out == &iobuf.out ? "out" : "msg", (SIZE_T_FMT_CAST)n);
854 last_io_out = time(NULL);
855 stats.total_written += n;
857 if (bwlimit_writemax)
858 sleep_for_bwlimit(n);
860 if ((out->pos += n) == out->size) {
861 if (iobuf.raw_flushing_ends_before)
862 iobuf.raw_flushing_ends_before -= out->size;
864 restore_iobuf_size(out);
865 } else if (out->pos == iobuf.raw_flushing_ends_before)
866 iobuf.raw_flushing_ends_before = 0;
867 if ((out->len -= n) == empty_buf_len) {
869 restore_iobuf_size(out);
871 iobuf.raw_data_header_pos = 0;
875 if (got_kill_signal > 0)
876 handle_kill_signal(True);
878 /* We need to help prevent deadlock by doing what reading
879 * we can whenever we are here trying to write. */
880 if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) {
881 while (!iobuf.raw_input_ends_before && iobuf.in.len > 512)
883 if (flist_receiving_enabled && iobuf.in.len > 512)
884 wait_for_receiver(); /* generator only */
887 if (ff_forward_fd >= 0 && FD_ISSET(ff_forward_fd, &r_fds)) {
888 /* This can potentially flush all output and enable
889 * multiplexed output, so keep this last in the loop
890 * and be sure to not cache anything that would break
892 forward_filesfrom_data();
897 if (got_kill_signal > 0)
898 handle_kill_signal(True);
900 data = iobuf.in.buf + iobuf.in.pos;
902 if (flags & PIO_CONSUME_INPUT) {
903 iobuf.in.len -= needed;
904 iobuf.in.pos += needed;
905 if (iobuf.in.pos == iobuf.raw_input_ends_before)
906 iobuf.raw_input_ends_before = 0;
907 if (iobuf.in.pos >= iobuf.in.size) {
908 iobuf.in.pos -= iobuf.in.size;
909 if (iobuf.raw_input_ends_before)
910 iobuf.raw_input_ends_before -= iobuf.in.size;
917 static void raw_read_buf(char *buf, size_t len)
919 size_t pos = iobuf.in.pos;
920 char *data = perform_io(len, PIO_INPUT_AND_CONSUME);
921 if (iobuf.in.pos <= pos && len) {
922 size_t siz = len - iobuf.in.pos;
923 memcpy(buf, data, siz);
924 memcpy(buf + siz, iobuf.in.buf, iobuf.in.pos);
926 memcpy(buf, data, len);
929 static int32 raw_read_int(void)
932 if (iobuf.in.size - iobuf.in.pos >= 4)
933 data = perform_io(4, PIO_INPUT_AND_CONSUME);
935 raw_read_buf(data = buf, 4);
936 return IVAL(data, 0);
939 void noop_io_until_death(void)
943 if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
946 /* If we're talking to a daemon over a socket, don't short-circuit this logic */
947 if (msgs2stderr && daemon_connection >= 0)
950 kluge_around_eof = 2;
951 /* Setting an I/O timeout ensures that if something inexplicably weird
952 * happens, we won't hang around forever. */
957 read_buf(iobuf.in_fd, buf, sizeof buf);
960 /* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */
961 int send_msg(enum msgcode code, const char *buf, size_t len, int convert)
965 BOOL want_debug = DEBUG_GTE(IO, 1) && convert >= 0 && (msgs2stderr == 1 || code != MSG_INFO);
967 if (!OUT_MULTIPLEXED)
971 rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n",
972 who_am_i(), (int)code, (SIZE_T_FMT_CAST)len);
975 /* When checking for enough free space for this message, we need to
976 * make sure that there is space for the 4-byte header, plus we'll
977 * assume that we may waste up to 3 bytes (if the header doesn't fit
978 * at the physical end of the buffer). */
980 if (convert > 0 && ic_send == (iconv_t)-1)
983 /* Ensuring double-size room leaves space for maximal conversion expansion. */
984 needed = len*2 + 4 + 3;
987 needed = len + 4 + 3;
988 if (iobuf.msg.len + needed > iobuf.msg.size) {
990 perform_io(needed, PIO_NEED_MSGROOM);
991 else { /* We sometimes allow the iobuf.msg size to increase to avoid a deadlock. */
992 size_t old_size = iobuf.msg.size;
993 restore_iobuf_size(&iobuf.msg);
994 realloc_xbuf(&iobuf.msg, iobuf.msg.size * 2);
995 if (iobuf.msg.pos + iobuf.msg.len > old_size)
996 memcpy(iobuf.msg.buf + old_size, iobuf.msg.buf, iobuf.msg.pos + iobuf.msg.len - old_size);
1000 pos = iobuf.msg.pos + iobuf.msg.len; /* Must be set after any flushing. */
1001 if (pos >= iobuf.msg.size)
1002 pos -= iobuf.msg.size;
1003 else if (pos + 4 > iobuf.msg.size) {
1004 /* The 4-byte header won't fit at the end of the buffer,
1005 * so we'll temporarily reduce the message buffer's size
1006 * and put the header at the start of the buffer. */
1007 reduce_iobuf_size(&iobuf.msg, pos);
1010 hdr = iobuf.msg.buf + pos;
1012 iobuf.msg.len += 4; /* Allocate room for the coming header bytes. */
1018 INIT_XBUF(inbuf, (char*)buf, len, (size_t)-1);
1020 len = iobuf.msg.len;
1021 iconvbufs(ic_send, &inbuf, &iobuf.msg,
1022 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT | ICB_INIT);
1023 if (inbuf.len > 0) {
1024 rprintf(FERROR, "overflowed iobuf.msg buffer in send_msg");
1025 exit_cleanup(RERR_UNSUPPORTED);
1027 len = iobuf.msg.len - len;
1033 if ((pos += 4) == iobuf.msg.size)
1036 /* Handle a split copy if we wrap around the end of the circular buffer. */
1037 if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) {
1038 memcpy(iobuf.msg.buf + pos, buf, siz);
1039 memcpy(iobuf.msg.buf, buf + siz, len - siz);
1041 memcpy(iobuf.msg.buf + pos, buf, len);
1043 iobuf.msg.len += len;
1046 SIVAL(hdr, 0, ((MPLEX_BASE + (int)code)<<24) + len);
1048 if (want_debug && convert > 0) {
1049 rprintf(FINFO, "[%s] converted msg len=%" SIZE_T_FMT_MOD "d\n",
1050 who_am_i(), (SIZE_T_FMT_CAST)len);
1056 void send_msg_int(enum msgcode code, int num)
1060 if (DEBUG_GTE(IO, 1))
1061 rprintf(FINFO, "[%s] send_msg_int(%d, %d)\n", who_am_i(), (int)code, num);
1063 SIVAL(numbuf, 0, num);
1064 send_msg(code, numbuf, 4, -1);
1067 static void got_flist_entry_status(enum festatus status, int ndx)
1069 struct file_list *flist = flist_for_ndx(ndx, "got_flist_entry_status");
1071 if (remove_source_files) {
1073 active_bytecnt -= F_LENGTH(flist->files[ndx - flist->ndx_start]);
1077 flist->in_progress--;
1081 if (remove_source_files)
1082 send_msg_int(MSG_SUCCESS, ndx);
1085 #ifdef SUPPORT_HARD_LINKS
1086 if (preserve_hard_links) {
1087 struct file_struct *file = flist->files[ndx - flist->ndx_start];
1088 if (F_IS_HLINKED(file)) {
1089 if (status == FES_NO_SEND)
1090 flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */
1091 flist_ndx_push(&hlink_list, ndx);
1093 flist->in_progress++;
1101 flist->in_progress++;
1106 flist_ndx_push(&redo_list, ndx);
1111 /* Note the fds used for the main socket (which might really be a pipe
1112 * for a local transfer, but we can ignore that). */
1113 void io_set_sock_fds(int f_in, int f_out)
1119 void set_io_timeout(int secs)
1122 allowed_lull = (io_timeout + 1) / 2;
1124 if (!io_timeout || allowed_lull > SELECT_TIMEOUT)
1125 select_timeout = SELECT_TIMEOUT;
1127 select_timeout = allowed_lull;
1133 static void check_for_d_option_error(const char *msg)
1135 static char rsync263_opts[] = "BCDHIKLPRSTWabceghlnopqrtuvxz";
1140 || strncmp(msg, REMOTE_OPTION_ERROR, sizeof REMOTE_OPTION_ERROR - 1) != 0)
1143 msg += sizeof REMOTE_OPTION_ERROR - 1;
1144 if (*msg == '-' || (colon = strchr(msg, ':')) == NULL
1145 || strncmp(colon, REMOTE_OPTION_ERROR2, sizeof REMOTE_OPTION_ERROR2 - 1) != 0)
1148 for ( ; *msg != ':'; msg++) {
1151 else if (*msg == 'e')
1153 else if (strchr(rsync263_opts, *msg) == NULL)
1158 rprintf(FWARNING, "*** Try using \"--old-d\" if remote rsync is <= 2.6.3 ***\n");
1162 /* This is used by the generator to limit how many file transfers can
1163 * be active at once when --remove-source-files is specified. Without
1164 * this, sender-side deletions were mostly happening at the end. */
1165 void increment_active_files(int ndx, int itemizing, enum logcode code)
1168 /* TODO: tune these limits? */
1169 int limit = active_bytecnt >= 128*1024 ? 10 : 50;
1170 if (active_filecnt < limit)
1172 check_for_finished_files(itemizing, code, 0);
1173 if (active_filecnt < limit)
1175 wait_for_receiver();
1179 active_bytecnt += F_LENGTH(cur_flist->files[ndx - cur_flist->ndx_start]);
1182 int get_redo_num(void)
1184 return flist_ndx_pop(&redo_list);
1187 int get_hlink_num(void)
1189 return flist_ndx_pop(&hlink_list);
1192 /* When we're the receiver and we have a local --files-from list of names
1193 * that needs to be sent over the socket to the sender, we have to do two
1194 * things at the same time: send the sender a list of what files we're
1195 * processing and read the incoming file+info list from the sender. We do
1196 * this by making recv_file_list() call forward_filesfrom_data(), which
1197 * will ensure that we forward data to the sender until we get some data
1198 * for recv_file_list() to use. */
1199 void start_filesfrom_forwarding(int fd)
1201 if (protocol_version < 31 && OUT_MULTIPLEXED) {
1202 /* Older protocols send the files-from data w/o packaging
1203 * it in multiplexed I/O packets, so temporarily switch
1204 * to buffered I/O to match this behavior. */
1205 iobuf.msg.pos = iobuf.msg.len = 0; /* Be extra sure no messages go out. */
1206 ff_reenable_multiplex = io_end_multiplex_out(MPLX_TO_BUFFERED);
1210 alloc_xbuf(&ff_xb, FILESFROM_BUFLEN);
1213 /* Read a line into the "buf" buffer. */
1214 int read_line(int fd, char *buf, size_t bufsiz, int flags)
1219 if (flags & RL_CONVERT && iconv_buf.size < bufsiz)
1220 realloc_xbuf(&iconv_buf, ROUND_UP_1024(bufsiz) + 1024);
1225 s = flags & RL_CONVERT ? iconv_buf.buf : buf;
1229 eob = s + bufsiz - 1;
1231 /* We avoid read_byte() for files because files can return an EOF. */
1232 if (fd == iobuf.in_fd)
1234 else if (safe_read(fd, &ch, 1) == 0)
1236 if (flags & RL_EOL_NULLS ? ch == '\0' : (ch == '\r' || ch == '\n')) {
1237 /* Skip empty lines if dumping comments. */
1238 if (flags & RL_DUMP_COMMENTS && s == buf)
1247 if (flags & RL_DUMP_COMMENTS && (*buf == '#' || *buf == ';'))
1251 if (flags & RL_CONVERT) {
1253 INIT_XBUF(outbuf, buf, 0, bufsiz);
1255 iconv_buf.len = s - iconv_buf.buf;
1256 iconvbufs(ic_recv, &iconv_buf, &outbuf,
1257 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_INIT);
1258 outbuf.buf[outbuf.len] = '\0';
1266 void read_args(int f_in, char *mod_name, char *buf, size_t bufsiz, int rl_nulls,
1267 char ***argv_p, int *argc_p, char **request_p)
1269 int maxargs = MAX_ARGS;
1270 int dot_pos = 0, argc = 0, request_len = 0;
1272 int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0);
1275 rl_flags |= (protect_args && ic_recv != (iconv_t)-1 ? RL_CONVERT : 0);
1278 argv = new_array(char *, maxargs);
1279 if (mod_name && !protect_args)
1280 argv[argc++] = "rsyncd";
1286 if (read_line(f_in, buf, bufsiz, rl_flags) == 0)
1289 if (argc == maxargs-1) {
1290 maxargs += MAX_ARGS;
1291 argv = realloc_array(argv, char *, maxargs);
1295 if (request_p && request_len < 1024) {
1296 int len = strlen(buf);
1298 request_p[0][request_len++] = ' ';
1299 *request_p = realloc_array(*request_p, char, request_len + len + 1);
1300 memcpy(*request_p + request_len, buf, len + 1);
1304 glob_expand_module(mod_name, buf, &argv, &argc, &maxargs);
1306 glob_expand(buf, &argv, &argc, &maxargs);
1310 if (*p == '.' && p[1] == '\0')
1316 glob_expand(NULL, NULL, NULL, NULL);
1322 BOOL io_start_buffering_out(int f_out)
1324 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
1325 rprintf(FINFO, "[%s] io_start_buffering_out(%d)\n", who_am_i(), f_out);
1327 if (iobuf.out.buf) {
1328 if (iobuf.out_fd == -1)
1329 iobuf.out_fd = f_out;
1331 assert(f_out == iobuf.out_fd);
1335 alloc_xbuf(&iobuf.out, ROUND_UP_1024(IO_BUFFER_SIZE * 2));
1336 iobuf.out_fd = f_out;
1341 BOOL io_start_buffering_in(int f_in)
1343 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
1344 rprintf(FINFO, "[%s] io_start_buffering_in(%d)\n", who_am_i(), f_in);
1347 if (iobuf.in_fd == -1)
1350 assert(f_in == iobuf.in_fd);
1354 alloc_xbuf(&iobuf.in, ROUND_UP_1024(IO_BUFFER_SIZE));
1360 void io_end_buffering_in(BOOL free_buffers)
1362 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
1363 rprintf(FINFO, "[%s] io_end_buffering_in(IOBUF_%s_BUFS)\n",
1364 who_am_i(), free_buffers ? "FREE" : "KEEP");
1368 free_xbuf(&iobuf.in);
1370 iobuf.in.pos = iobuf.in.len = 0;
1375 void io_end_buffering_out(BOOL free_buffers)
1377 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
1378 rprintf(FINFO, "[%s] io_end_buffering_out(IOBUF_%s_BUFS)\n",
1379 who_am_i(), free_buffers ? "FREE" : "KEEP");
1382 io_flush(FULL_FLUSH);
1385 free_xbuf(&iobuf.out);
1386 free_xbuf(&iobuf.msg);
1392 void maybe_flush_socket(int important)
1394 if (flist_eof && iobuf.out.buf && iobuf.out.len > iobuf.out_empty_len
1395 && (important || time(NULL) - last_io_out >= 5))
1396 io_flush(NORMAL_FLUSH);
1399 /* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a
1400 * raw-data-based keep-alive (protocol 29), both of which implied forwarding of
1401 * the message through the sender. Since the new timeout method does not need
1402 * any forwarding, we just send an empty MSG_DATA message, which works with all
1403 * rsync versions. This avoids any message forwarding, and leaves the raw-data
1404 * stream alone (since we can never be quite sure if that stream is in the
1405 * right state for a keep-alive message). */
1406 void maybe_send_keepalive(time_t now, int flags)
1408 if (flags & MSK_ACTIVE_RECEIVER)
1409 last_io_in = now; /* Fudge things when we're working hard on the files. */
1411 /* Early in the transfer (before the receiver forks) the receiving side doesn't
1412 * care if it hasn't sent data in a while as long as it is receiving data (in
1413 * fact, a pre-3.1.0 rsync would die if we tried to send it a keep alive during
1414 * this time). So, if we're an early-receiving proc, just return and let the
1415 * incoming data determine if we timeout. */
1416 if (!am_sender && !am_receiver && !am_generator)
1419 if (now - last_io_out >= allowed_lull) {
1420 /* The receiver is special: it only sends keep-alive messages if it is
1421 * actively receiving data. Otherwise, it lets the generator timeout. */
1422 if (am_receiver && now - last_io_in >= io_timeout)
1425 if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len)
1426 send_msg(MSG_DATA, "", 0, 0);
1427 if (!(flags & MSK_ALLOW_FLUSH)) {
1428 /* Let the caller worry about writing out the data. */
1429 } else if (iobuf.msg.len)
1430 perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM);
1431 else if (iobuf.out.len > iobuf.out_empty_len)
1432 io_flush(NORMAL_FLUSH);
1436 void start_flist_forward(int ndx)
1438 write_int(iobuf.out_fd, ndx);
1439 forward_flist_data = 1;
1442 void stop_flist_forward(void)
1444 forward_flist_data = 0;
1447 /* Read a message from a multiplexed source. */
1448 static void read_a_msg(void)
1450 char data[BIGPATHBUFLEN];
1454 /* This ensures that perform_io() does not try to do any message reading
1455 * until we've read all of the data for this message. We should also
1456 * try to avoid calling things that will cause data to be written via
1457 * perform_io() prior to this being reset to 1. */
1458 iobuf.in_multiplexed = -1;
1460 tag = raw_read_int();
1462 msg_bytes = tag & 0xFFFFFF;
1463 tag = (tag >> 24) - MPLEX_BASE;
1465 if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) {
1466 rprintf(FINFO, "[%s] got msg=%d, len=%" SIZE_T_FMT_MOD "d\n",
1467 who_am_i(), (int)tag, (SIZE_T_FMT_CAST)msg_bytes);
1472 assert(iobuf.raw_input_ends_before == 0);
1473 /* Though this does not yet read the data, we do mark where in
1474 * the buffer the msg data will end once it is read. It is
1475 * possible that this points off the end of the buffer, in
1476 * which case the gradual reading of the input stream will
1477 * cause this value to wrap around and eventually become real. */
1479 iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
1480 iobuf.in_multiplexed = 1;
1483 if (msg_bytes != sizeof stats.total_read || !am_generator)
1485 raw_read_buf((char*)&stats.total_read, sizeof stats.total_read);
1486 iobuf.in_multiplexed = 1;
1489 if (msg_bytes != 4 || !am_generator)
1491 val = raw_read_int();
1492 iobuf.in_multiplexed = 1;
1493 got_flist_entry_status(FES_REDO, val);
1498 val = raw_read_int();
1499 iobuf.in_multiplexed = 1;
1502 send_msg_int(MSG_IO_ERROR, val);
1504 case MSG_IO_TIMEOUT:
1505 if (msg_bytes != 4 || am_server || am_generator)
1507 val = raw_read_int();
1508 iobuf.in_multiplexed = 1;
1509 if (!io_timeout || io_timeout > val) {
1510 if (INFO_GTE(MISC, 2))
1511 rprintf(FINFO, "Setting --timeout=%d to match server\n", val);
1512 set_io_timeout(val);
1516 /* Support protocol-30 keep-alive method. */
1519 iobuf.in_multiplexed = 1;
1521 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
1524 if (msg_bytes >= sizeof data)
1527 raw_read_buf(data, msg_bytes);
1528 iobuf.in_multiplexed = 1;
1529 send_msg(MSG_DELETED, data, msg_bytes, 1);
1533 if (ic_recv != (iconv_t)-1) {
1537 int flags = ICB_INCLUDE_BAD | ICB_INIT;
1539 INIT_CONST_XBUF(outbuf, data);
1540 INIT_XBUF(inbuf, ibuf, 0, (size_t)-1);
1543 size_t len = msg_bytes > sizeof ibuf - inbuf.len ? sizeof ibuf - inbuf.len : msg_bytes;
1544 raw_read_buf(ibuf + inbuf.len, len);
1547 if (!(msg_bytes -= len) && !ibuf[inbuf.len-1])
1548 inbuf.len--, add_null = 1;
1549 if (iconvbufs(ic_send, &inbuf, &outbuf, flags) < 0) {
1552 /* Buffer ended with an incomplete char, so move the
1553 * bytes to the start of the buffer and continue. */
1554 memmove(ibuf, ibuf + inbuf.pos, inbuf.len);
1559 if (outbuf.len == outbuf.size)
1561 outbuf.buf[outbuf.len++] = '\0';
1563 msg_bytes = outbuf.len;
1566 raw_read_buf(data, msg_bytes);
1567 iobuf.in_multiplexed = 1;
1568 /* A directory name was sent with the trailing null */
1569 if (msg_bytes > 0 && !data[msg_bytes-1])
1570 log_delete(data, S_IFDIR);
1572 data[msg_bytes] = '\0';
1573 log_delete(data, S_IFREG);
1577 if (msg_bytes != 4) {
1579 rprintf(FERROR, "invalid multi-message %d:%lu [%s%s]\n",
1580 tag, (unsigned long)msg_bytes, who_am_i(),
1581 inc_recurse ? "/inc" : "");
1582 exit_cleanup(RERR_STREAMIO);
1584 val = raw_read_int();
1585 iobuf.in_multiplexed = 1;
1587 got_flist_entry_status(FES_SUCCESS, val);
1589 successful_send(val);
1594 val = raw_read_int();
1595 iobuf.in_multiplexed = 1;
1597 got_flist_entry_status(FES_NO_SEND, val);
1599 send_msg_int(MSG_NO_SEND, val);
1601 case MSG_ERROR_SOCKET:
1602 case MSG_ERROR_UTF8:
1607 if (tag == MSG_ERROR_SOCKET)
1612 case MSG_ERROR_XFER:
1614 if (msg_bytes >= sizeof data) {
1617 "multiplexing overflow %d:%lu [%s%s]\n",
1618 tag, (unsigned long)msg_bytes, who_am_i(),
1619 inc_recurse ? "/inc" : "");
1620 exit_cleanup(RERR_STREAMIO);
1622 raw_read_buf(data, msg_bytes);
1623 /* We don't set in_multiplexed value back to 1 before writing this message
1624 * because the write might loop back and read yet another message, over and
1625 * over again, while waiting for room to put the message in the msg buffer. */
1626 rwrite((enum logcode)tag, data, msg_bytes, !am_generator);
1627 iobuf.in_multiplexed = 1;
1628 if (first_message) {
1629 if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) {
1630 data[msg_bytes] = '\0';
1631 check_for_d_option_error(data);
1636 case MSG_ERROR_EXIT:
1638 val = raw_read_int();
1639 else if (msg_bytes == 0)
1643 iobuf.in_multiplexed = 1;
1644 if (DEBUG_GTE(EXIT, 3)) {
1645 rprintf(FINFO, "[%s] got MSG_ERROR_EXIT with %" SIZE_T_FMT_MOD "d bytes\n",
1646 who_am_i(), (SIZE_T_FMT_CAST)msg_bytes);
1648 if (msg_bytes == 0) {
1649 if (!am_sender && !am_generator) {
1650 if (DEBUG_GTE(EXIT, 3)) {
1651 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1654 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1655 io_flush(FULL_FLUSH);
1657 } else if (protocol_version >= 31) {
1658 if (am_generator || am_receiver) {
1659 if (DEBUG_GTE(EXIT, 3)) {
1660 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
1663 send_msg_int(MSG_ERROR_EXIT, val);
1665 if (DEBUG_GTE(EXIT, 3)) {
1666 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1669 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1672 /* Send a negative linenum so that we don't end up
1673 * with a duplicate exit message. */
1674 _exit_cleanup(val, __FILE__, 0 - __LINE__);
1676 rprintf(FERROR, "unexpected tag %d [%s%s]\n",
1677 tag, who_am_i(), inc_recurse ? "/inc" : "");
1678 exit_cleanup(RERR_STREAMIO);
1681 assert(iobuf.in_multiplexed > 0);
1684 static void drain_multiplex_messages(void)
1686 while (IN_MULTIPLEXED_AND_READY && iobuf.in.len) {
1687 if (iobuf.raw_input_ends_before) {
1688 size_t raw_len = iobuf.raw_input_ends_before - iobuf.in.pos;
1689 iobuf.raw_input_ends_before = 0;
1690 if (raw_len >= iobuf.in.len) {
1694 iobuf.in.len -= raw_len;
1695 if ((iobuf.in.pos += raw_len) >= iobuf.in.size)
1696 iobuf.in.pos -= iobuf.in.size;
1702 void wait_for_receiver(void)
1704 if (!iobuf.raw_input_ends_before)
1707 if (iobuf.raw_input_ends_before) {
1708 int ndx = read_int(iobuf.in_fd);
1713 if (DEBUG_GTE(FLIST, 3))
1714 rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
1720 exit_cleanup(RERR_STREAMIO);
1723 struct file_list *flist;
1724 flist_receiving_enabled = False;
1725 if (DEBUG_GTE(FLIST, 2)) {
1726 rprintf(FINFO, "[%s] receiving flist for dir %d\n",
1729 flist = recv_file_list(iobuf.in_fd, ndx);
1730 flist->parent_ndx = ndx;
1731 #ifdef SUPPORT_HARD_LINKS
1732 if (preserve_hard_links)
1733 match_hard_links(flist);
1735 flist_receiving_enabled = True;
1740 unsigned short read_shortint(int f)
1744 return (UVAL(b, 1) << 8) + UVAL(b, 0);
1747 int32 read_int(int f)
1754 #if SIZEOF_INT32 > 4
1755 if (num & (int32)0x80000000)
1756 num |= ~(int32)0xffffffff;
1761 int32 read_varint(int f)
1772 extra = int_byte_extra[ch / 4];
1774 uchar bit = ((uchar)1<<(8-extra));
1775 if (extra >= (int)sizeof u.b) {
1776 rprintf(FERROR, "Overflow in read_varint()\n");
1777 exit_cleanup(RERR_STREAMIO);
1779 read_buf(f, u.b, extra);
1780 u.b[extra] = ch & (bit-1);
1783 #if CAREFUL_ALIGNMENT
1786 #if SIZEOF_INT32 > 4
1787 if (u.x & (int32)0x80000000)
1788 u.x |= ~(int32)0xffffffff;
1793 int64 read_varlong(int f, uchar min_bytes)
1802 #if SIZEOF_INT64 < 8
1807 read_buf(f, b2, min_bytes);
1808 memcpy(u.b, b2+1, min_bytes-1);
1809 extra = int_byte_extra[CVAL(b2, 0) / 4];
1811 uchar bit = ((uchar)1<<(8-extra));
1812 if (min_bytes + extra > (int)sizeof u.b) {
1813 rprintf(FERROR, "Overflow in read_varlong()\n");
1814 exit_cleanup(RERR_STREAMIO);
1816 read_buf(f, u.b + min_bytes - 1, extra);
1817 u.b[min_bytes + extra - 1] = CVAL(b2, 0) & (bit-1);
1818 #if SIZEOF_INT64 < 8
1819 if (min_bytes + extra > 5 || u.b[4] || CVAL(u.b,3) & 0x80) {
1820 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1821 exit_cleanup(RERR_UNSUPPORTED);
1825 u.b[min_bytes + extra - 1] = CVAL(b2, 0);
1826 #if SIZEOF_INT64 < 8
1828 #elif CAREFUL_ALIGNMENT
1829 u.x = IVAL64(u.b,0);
1834 int64 read_longint(int f)
1836 #if SIZEOF_INT64 >= 8
1839 int32 num = read_int(f);
1841 if (num != (int32)0xffffffff)
1844 #if SIZEOF_INT64 < 8
1845 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1846 exit_cleanup(RERR_UNSUPPORTED);
1849 return IVAL(b,0) | (((int64)IVAL(b,4))<<32);
1853 /* Debugging note: this will be named read_buf_() when using an external zlib. */
1854 void read_buf(int f, char *buf, size_t len)
1856 if (f != iobuf.in_fd) {
1857 if (safe_read(f, buf, len) != len)
1858 whine_about_eof(False); /* Doesn't return. */
1862 if (!IN_MULTIPLEXED) {
1863 raw_read_buf(buf, len);
1864 total_data_read += len;
1865 if (forward_flist_data)
1866 write_buf(iobuf.out_fd, buf, len);
1868 if (f == write_batch_monitor_in)
1869 safe_write(batch_fd, buf, len);
1876 while (!iobuf.raw_input_ends_before)
1879 siz = MIN(len, iobuf.raw_input_ends_before - iobuf.in.pos);
1880 if (siz >= iobuf.in.size)
1881 siz = iobuf.in.size;
1882 raw_read_buf(buf, siz);
1883 total_data_read += siz;
1885 if (forward_flist_data)
1886 write_buf(iobuf.out_fd, buf, siz);
1888 if (f == write_batch_monitor_in)
1889 safe_write(batch_fd, buf, siz);
1891 if ((len -= siz) == 0)
1897 void read_sbuf(int f, char *buf, size_t len)
1899 read_buf(f, buf, len);
1903 uchar read_byte(int f)
1906 read_buf(f, (char*)&c, 1);
1910 int read_vstring(int f, char *buf, int bufsize)
1912 int len = read_byte(f);
1915 len = (len & ~0x80) * 0x100 + read_byte(f);
1917 if (len >= bufsize) {
1918 rprintf(FERROR, "over-long vstring received (%d > %d)\n",
1924 read_buf(f, buf, len);
1929 /* Populate a sum_struct with values from the socket. This is
1930 * called by both the sender and the receiver. */
1931 void read_sum_head(int f, struct sum_struct *sum)
1933 int32 max_blength = protocol_version < 30 ? OLD_MAX_BLOCK_SIZE : MAX_BLOCK_SIZE;
1934 sum->count = read_int(f);
1935 if (sum->count < 0) {
1936 rprintf(FERROR, "Invalid checksum count %ld [%s]\n",
1937 (long)sum->count, who_am_i());
1938 exit_cleanup(RERR_PROTOCOL);
1940 sum->blength = read_int(f);
1941 if (sum->blength < 0 || sum->blength > max_blength) {
1942 rprintf(FERROR, "Invalid block length %ld [%s]\n",
1943 (long)sum->blength, who_am_i());
1944 exit_cleanup(RERR_PROTOCOL);
1946 sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f);
1947 if (sum->s2length < 0 || sum->s2length > MAX_DIGEST_LEN) {
1948 rprintf(FERROR, "Invalid checksum length %d [%s]\n",
1949 sum->s2length, who_am_i());
1950 exit_cleanup(RERR_PROTOCOL);
1952 sum->remainder = read_int(f);
1953 if (sum->remainder < 0 || sum->remainder > sum->blength) {
1954 rprintf(FERROR, "Invalid remainder length %ld [%s]\n",
1955 (long)sum->remainder, who_am_i());
1956 exit_cleanup(RERR_PROTOCOL);
1960 /* Send the values from a sum_struct over the socket. Set sum to
1961 * NULL if there are no checksums to send. This is called by both
1962 * the generator and the sender. */
1963 void write_sum_head(int f, struct sum_struct *sum)
1965 static struct sum_struct null_sum;
1970 write_int(f, sum->count);
1971 write_int(f, sum->blength);
1972 if (protocol_version >= 27)
1973 write_int(f, sum->s2length);
1974 write_int(f, sum->remainder);
1977 /* Sleep after writing to limit I/O bandwidth usage.
1979 * @todo Rather than sleeping after each write, it might be better to
1980 * use some kind of averaging. The current algorithm seems to always
1981 * use a bit less bandwidth than specified, because it doesn't make up
1982 * for slow periods. But arguably this is a feature. In addition, we
1983 * ought to take the time used to write the data into account.
1985 * During some phases of big transfers (file FOO is uptodate) this is
1986 * called with a small bytes_written every time. As the kernel has to
1987 * round small waits up to guarantee that we actually wait at least the
1988 * requested number of microseconds, this can become grossly inaccurate.
1989 * We therefore keep track of the bytes we've written over time and only
1990 * sleep when the accumulated delay is at least 1 tenth of a second. */
1991 static void sleep_for_bwlimit(int bytes_written)
1993 static struct timeval prior_tv;
1994 static long total_written = 0;
1995 struct timeval tv, start_tv;
1996 long elapsed_usec, sleep_usec;
1998 #define ONE_SEC 1000000L /* # of microseconds in a second */
2000 total_written += bytes_written;
2002 gettimeofday(&start_tv, NULL);
2003 if (prior_tv.tv_sec) {
2004 elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
2005 + (start_tv.tv_usec - prior_tv.tv_usec);
2006 total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024);
2007 if (total_written < 0)
2011 sleep_usec = total_written * (ONE_SEC/1024) / bwlimit;
2012 if (sleep_usec < ONE_SEC / 10) {
2013 prior_tv = start_tv;
2017 tv.tv_sec = sleep_usec / ONE_SEC;
2018 tv.tv_usec = sleep_usec % ONE_SEC;
2019 select(0, NULL, NULL, NULL, &tv);
2021 gettimeofday(&prior_tv, NULL);
2022 elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC
2023 + (prior_tv.tv_usec - start_tv.tv_usec);
2024 total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024);
2027 void io_flush(int flush_type)
2029 if (iobuf.out.len > iobuf.out_empty_len) {
2030 if (flush_type == FULL_FLUSH) /* flush everything in the output buffers */
2031 perform_io(iobuf.out.size - iobuf.out_empty_len, PIO_NEED_OUTROOM);
2032 else if (flush_type == NORMAL_FLUSH) /* flush at least 1 byte */
2033 perform_io(iobuf.out.size - iobuf.out.len + 1, PIO_NEED_OUTROOM);
2034 /* MSG_FLUSH: flush iobuf.msg only */
2037 perform_io(iobuf.msg.size, PIO_NEED_MSGROOM);
2040 void write_shortint(int f, unsigned short x)
2044 b[1] = (char)(x >> 8);
2048 void write_int(int f, int32 x)
2055 void write_varint(int f, int32 x)
2063 for (cnt = 4; cnt > 1 && b[cnt] == 0; cnt--) {}
2064 bit = ((uchar)1<<(7-cnt+1));
2066 if (CVAL(b, cnt) >= bit) {
2070 *b = b[cnt] | ~(bit*2-1);
2074 write_buf(f, b, cnt);
2077 void write_varlong(int f, int64 x, uchar min_bytes)
2083 #if SIZEOF_INT64 >= 8
2087 if (x <= 0x7FFFFFFF && x >= 0)
2088 memset(b + 5, 0, 4);
2090 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2091 exit_cleanup(RERR_UNSUPPORTED);
2095 while (cnt > min_bytes && b[cnt] == 0)
2097 bit = ((uchar)1<<(7-cnt+min_bytes));
2098 if (CVAL(b, cnt) >= bit) {
2101 } else if (cnt > min_bytes)
2102 *b = b[cnt] | ~(bit*2-1);
2106 write_buf(f, b, cnt);
2110 * Note: int64 may actually be a 32-bit type if ./configure couldn't find any
2111 * 64-bit types on this platform.
2113 void write_longint(int f, int64 x)
2115 char b[12], * const s = b+4;
2118 if (x <= 0x7FFFFFFF && x >= 0) {
2123 #if SIZEOF_INT64 < 8
2124 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2125 exit_cleanup(RERR_UNSUPPORTED);
2128 SIVAL(s, 4, x >> 32);
2129 write_buf(f, b, 12);
2133 void write_bigbuf(int f, const char *buf, size_t len)
2135 size_t half_max = (iobuf.out.size - iobuf.out_empty_len) / 2;
2137 while (len > half_max + 1024) {
2138 write_buf(f, buf, half_max);
2143 write_buf(f, buf, len);
2146 void write_buf(int f, const char *buf, size_t len)
2150 if (f != iobuf.out_fd) {
2151 safe_write(f, buf, len);
2155 if (iobuf.out.len + len > iobuf.out.size)
2156 perform_io(len, PIO_NEED_OUTROOM);
2158 pos = iobuf.out.pos + iobuf.out.len; /* Must be set after any flushing. */
2159 if (pos >= iobuf.out.size)
2160 pos -= iobuf.out.size;
2162 /* Handle a split copy if we wrap around the end of the circular buffer. */
2163 if (pos >= iobuf.out.pos && (siz = iobuf.out.size - pos) < len) {
2164 memcpy(iobuf.out.buf + pos, buf, siz);
2165 memcpy(iobuf.out.buf, buf + siz, len - siz);
2167 memcpy(iobuf.out.buf + pos, buf, len);
2169 iobuf.out.len += len;
2170 total_data_written += len;
2173 if (f == write_batch_monitor_out)
2174 safe_write(batch_fd, buf, len);
2177 /* Write a string to the connection */
2178 void write_sbuf(int f, const char *buf)
2180 write_buf(f, buf, strlen(buf));
2183 void write_byte(int f, uchar c)
2185 write_buf(f, (char *)&c, 1);
2188 void write_vstring(int f, const char *str, int len)
2190 uchar lenbuf[3], *lb = lenbuf;
2195 "attempting to send over-long vstring (%d > %d)\n",
2197 exit_cleanup(RERR_PROTOCOL);
2199 *lb++ = len / 0x100 + 0x80;
2203 write_buf(f, (char*)lenbuf, lb - lenbuf + 1);
2205 write_buf(f, str, len);
2208 /* Send a file-list index using a byte-reduction method. */
2209 void write_ndx(int f, int32 ndx)
2211 static int32 prev_positive = -1, prev_negative = 1;
2212 int32 diff, cnt = 0;
2215 if (protocol_version < 30 || read_batch) {
2220 /* Send NDX_DONE as a single-byte 0 with no side effects. Send
2221 * negative nums as a positive after sending a leading 0xFF. */
2223 diff = ndx - prev_positive;
2224 prev_positive = ndx;
2225 } else if (ndx == NDX_DONE) {
2230 b[cnt++] = (char)0xFF;
2232 diff = ndx - prev_negative;
2233 prev_negative = ndx;
2236 /* A diff of 1 - 253 is sent as a one-byte diff; a diff of 254 - 32767
2237 * or 0 is sent as a 0xFE + a two-byte diff; otherwise we send 0xFE
2238 * & all 4 bytes of the (non-negative) num with the high-bit set. */
2239 if (diff < 0xFE && diff > 0)
2240 b[cnt++] = (char)diff;
2241 else if (diff < 0 || diff > 0x7FFF) {
2242 b[cnt++] = (char)0xFE;
2243 b[cnt++] = (char)((ndx >> 24) | 0x80);
2244 b[cnt++] = (char)ndx;
2245 b[cnt++] = (char)(ndx >> 8);
2246 b[cnt++] = (char)(ndx >> 16);
2248 b[cnt++] = (char)0xFE;
2249 b[cnt++] = (char)(diff >> 8);
2250 b[cnt++] = (char)diff;
2252 write_buf(f, b, cnt);
2255 /* Receive a file-list index using a byte-reduction method. */
2256 int32 read_ndx(int f)
2258 static int32 prev_positive = -1, prev_negative = 1;
2259 int32 *prev_ptr, num;
2262 if (protocol_version < 30)
2266 if (CVAL(b, 0) == 0xFF) {
2268 prev_ptr = &prev_negative;
2269 } else if (CVAL(b, 0) == 0)
2272 prev_ptr = &prev_positive;
2273 if (CVAL(b, 0) == 0xFE) {
2275 if (CVAL(b, 0) & 0x80) {
2276 b[3] = CVAL(b, 0) & ~0x80;
2278 read_buf(f, b+1, 2);
2281 num = (UVAL(b,0)<<8) + UVAL(b,1) + *prev_ptr;
2283 num = UVAL(b, 0) + *prev_ptr;
2285 if (prev_ptr == &prev_negative)
2290 /* Read a line of up to bufsiz-1 characters into buf. Strips
2291 * the (required) trailing newline and all carriage returns.
2292 * Returns 1 for success; 0 for I/O error or truncation. */
2293 int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok)
2295 assert(fd != iobuf.in_fd);
2296 bufsiz--; /* leave room for the null */
2297 while (bufsiz > 0) {
2298 if (safe_read(fd, buf, 1) == 0) {
2316 void io_printf(int fd, const char *format, ...)
2319 char buf[BIGPATHBUFLEN];
2322 va_start(ap, format);
2323 len = vsnprintf(buf, sizeof buf, format, ap);
2327 exit_cleanup(RERR_PROTOCOL);
2329 if (len >= (int)sizeof buf) {
2330 rprintf(FERROR, "io_printf() was too long for the buffer.\n");
2331 exit_cleanup(RERR_PROTOCOL);
2334 write_sbuf(fd, buf);
2337 /* Setup for multiplexing a MSG_* stream with the data stream. */
2338 void io_start_multiplex_out(int fd)
2340 io_flush(FULL_FLUSH);
2342 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2343 rprintf(FINFO, "[%s] io_start_multiplex_out(%d)\n", who_am_i(), fd);
2346 alloc_xbuf(&iobuf.msg, ROUND_UP_1024(IO_BUFFER_SIZE));
2348 iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */
2349 io_start_buffering_out(fd);
2350 got_kill_signal = 0;
2352 iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len;
2356 /* Setup for multiplexing a MSG_* stream with the data stream. */
2357 void io_start_multiplex_in(int fd)
2359 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2360 rprintf(FINFO, "[%s] io_start_multiplex_in(%d)\n", who_am_i(), fd);
2362 iobuf.in_multiplexed = 1; /* See also IN_MULTIPLEXED */
2363 io_start_buffering_in(fd);
2366 int io_end_multiplex_in(int mode)
2368 int ret = iobuf.in_multiplexed ? iobuf.in_fd : -1;
2370 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2371 rprintf(FINFO, "[%s] io_end_multiplex_in(mode=%d)\n", who_am_i(), mode);
2373 iobuf.in_multiplexed = 0;
2374 if (mode == MPLX_SWITCHING)
2375 iobuf.raw_input_ends_before = 0;
2377 assert(iobuf.raw_input_ends_before == 0);
2378 if (mode != MPLX_TO_BUFFERED)
2379 io_end_buffering_in(mode);
2384 int io_end_multiplex_out(int mode)
2386 int ret = iobuf.out_empty_len ? iobuf.out_fd : -1;
2388 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2389 rprintf(FINFO, "[%s] io_end_multiplex_out(mode=%d)\n", who_am_i(), mode);
2391 if (mode != MPLX_TO_BUFFERED)
2392 io_end_buffering_out(mode);
2394 io_flush(FULL_FLUSH);
2397 iobuf.out_empty_len = 0;
2398 if (got_kill_signal > 0) /* Just in case... */
2399 handle_kill_signal(False);
2400 got_kill_signal = -1;
2405 void start_write_batch(int fd)
2407 /* Some communication has already taken place, but we don't
2408 * enable batch writing until here so that we can write a
2409 * canonical record of the communication even though the
2410 * actual communication so far depends on whether a daemon
2412 write_int(batch_fd, protocol_version);
2413 if (protocol_version >= 30)
2414 write_varint(batch_fd, compat_flags);
2415 write_int(batch_fd, checksum_seed);
2418 write_batch_monitor_out = fd;
2420 write_batch_monitor_in = fd;
2423 void stop_write_batch(void)
2425 write_batch_monitor_out = -1;
2426 write_batch_monitor_in = -1;