1 |
#!/usr/bin/perl |
---|
2 |
|
---|
3 |
# Copyright (c) 2007 OmniTI Computer Consulting, Inc. All rights reserved. |
---|
4 |
# For information on licensing see: |
---|
5 |
# https://labs.omniti.com/zetaback/trunk/LICENSE |
---|
6 |
|
---|
7 |
use strict; |
---|
8 |
use Getopt::Long; |
---|
9 |
use MIME::Base64; |
---|
10 |
use POSIX qw/strftime/; |
---|
11 |
use Fcntl qw/:flock/; |
---|
12 |
use File::Copy; |
---|
13 |
use IO::File; |
---|
14 |
use Pod::Usage; |
---|
15 |
|
---|
16 |
use vars qw/%conf %locks $version_string |
---|
17 |
$PREFIX $CONF $BLOCKSIZE $DEBUG $HOST $BACKUP |
---|
18 |
$RESTORE $RESTORE_HOST $RESTORE_ZFS $TIMESTAMP |
---|
19 |
$LIST $SUMMARY $SUMMARY_EXT $SUMMARY_VIOLATORS |
---|
20 |
$FORCE_FULL $FORCE_INC $EXPUNGE $NEUTERED $ZFS |
---|
21 |
$SHOW_FILENAMES $ARCHIVE $VERSION $HELP/; |
---|
22 |
$version_string = '0.1'; |
---|
23 |
$PREFIX = q^__PREFIX__^; |
---|
24 |
$CONF = qq^$PREFIX/etc/zetaback.conf^; |
---|
25 |
$BLOCKSIZE = 1024*64; |
---|
26 |
|
---|
27 |
$conf{'default'}->{'time_format'} = "%Y-%m-%d %H:%M:%S"; |
---|
28 |
$conf{'default'}->{'retention'} = 14 * 86400; |
---|
29 |
$conf{'default'}->{'compressionlevel'} = 1; |
---|
30 |
|
---|
31 |
=pod |
---|
32 |
|
---|
33 |
=head1 NAME |
---|
34 |
|
---|
35 |
zetaback - perform backup, restore and retention policies for ZFS backups. |
---|
36 |
|
---|
37 |
=head1 SYNOPSIS |
---|
38 |
|
---|
39 |
zetaback -v |
---|
40 |
|
---|
41 |
zetaback [-l | -s | -sx | -sv] [--files] [-c conf] [-d] [-h host] [-z zfs] |
---|
42 |
|
---|
43 |
zetaback -a [-c conf] [-d] [-h host] [-z zfs] |
---|
44 |
|
---|
45 |
zetaback -b [-ff] [-fi] [-x] [-c conf] [-d] [-n] [-h host] [-z zfs] |
---|
46 |
|
---|
47 |
zetaback -x [-b] [-c conf] [-d] [-n] [-h host] [-z zfs] |
---|
48 |
|
---|
49 |
zetaback -r [-c conf] [-d] [-n] [-h host] [-z zfs] [-t timestamp] |
---|
50 |
[-rhost host] [-rzfs fs] |
---|
51 |
|
---|
52 |
=cut |
---|
53 |
|
---|
54 |
GetOptions( |
---|
55 |
"h=s" => \$HOST, |
---|
56 |
"z=s" => \$ZFS, |
---|
57 |
"c=s" => \$CONF, |
---|
58 |
"a" => \$ARCHIVE, |
---|
59 |
"b" => \$BACKUP, |
---|
60 |
"l" => \$LIST, |
---|
61 |
"s" => \$SUMMARY, |
---|
62 |
"sx" => \$SUMMARY_EXT, |
---|
63 |
"sv" => \$SUMMARY_VIOLATORS, |
---|
64 |
"r" => \$RESTORE, |
---|
65 |
"t=i" => \$TIMESTAMP, |
---|
66 |
"rhost=s" => \$RESTORE_HOST, |
---|
67 |
"rzfs=s" => \$RESTORE_ZFS, |
---|
68 |
"d" => \$DEBUG, |
---|
69 |
"n" => \$NEUTERED, |
---|
70 |
"x" => \$EXPUNGE, |
---|
71 |
"v" => \$VERSION, |
---|
72 |
"ff" => \$FORCE_FULL, |
---|
73 |
"fi" => \$FORCE_INC, |
---|
74 |
"files" => \$SHOW_FILENAMES, |
---|
75 |
); |
---|
76 |
|
---|
77 |
# actions allowed together 'x' and 'b' all others are exclusive: |
---|
78 |
my $actions = 0; |
---|
79 |
$actions++ if($ARCHIVE); |
---|
80 |
$actions++ if($BACKUP || $EXPUNGE); |
---|
81 |
$actions++ if($RESTORE); |
---|
82 |
$actions++ if($LIST); |
---|
83 |
$actions++ if($SUMMARY); |
---|
84 |
$actions++ if($SUMMARY_EXT); |
---|
85 |
$actions++ if($SUMMARY_VIOLATORS); |
---|
86 |
$actions++ if($VERSION); |
---|
87 |
$actions++ if($BACKUP && $FORCE_FULL && $FORCE_INC); |
---|
88 |
if($actions != 1) { |
---|
89 |
pod2usage({ -verbose => 0 }); |
---|
90 |
exit -1; |
---|
91 |
} |
---|
92 |
|
---|
93 |
=pod |
---|
94 |
|
---|
95 |
=head1 DESCRIPTION |
---|
96 |
|
---|
97 |
The B<zetaback> program orchestrates the backup (either full or |
---|
98 |
incremental) of remote ZFS filesystems to a local store. It handles |
---|
99 |
frequency requirements for both full and incemental backups as well |
---|
100 |
as retention policies. In addition to backups, the B<zetaback> tool |
---|
101 |
allows for the restore of any backup to a specified host and zfs |
---|
102 |
filesystem. |
---|
103 |
|
---|
104 |
=head1 OPTIONS |
---|
105 |
|
---|
106 |
The non-optional action command line arguments define the invocation purpose |
---|
107 |
of B<zetaback>. All other arguments are optional and refine the target |
---|
108 |
of the action specified. |
---|
109 |
|
---|
110 |
=head2 Generic Options |
---|
111 |
|
---|
112 |
The following arguments have the same meaning over several actions: |
---|
113 |
|
---|
114 |
=over |
---|
115 |
|
---|
116 |
=item -c <conf> |
---|
117 |
|
---|
118 |
Use the specified file as the configuration file. The default file, if |
---|
119 |
none is specified is /usr/local/etc/zetaback.conf. The prefix of this |
---|
120 |
file may also be specified as an argument to the configure script. |
---|
121 |
|
---|
122 |
=item -d |
---|
123 |
|
---|
124 |
Enable debugging output. |
---|
125 |
|
---|
126 |
=item -n |
---|
127 |
|
---|
128 |
Don't actually perform any remote commands or expunging. This is useful with |
---|
129 |
the -d argument to ascertain what would be done if the command was actually |
---|
130 |
executed. |
---|
131 |
|
---|
132 |
=item -t <timestamp> |
---|
133 |
|
---|
134 |
Used during the restore process to specify a backup image from the desired |
---|
135 |
point in time. If omitted, the command becomes interactive. This timestamp |
---|
136 |
is a UNIX timestamp and is shown in the output of the -s and -sx actions. |
---|
137 |
|
---|
138 |
=item -rhost <host> |
---|
139 |
|
---|
140 |
Specify the remote host that is the target for a restore operation. If |
---|
141 |
omitted the command becomes interactive. |
---|
142 |
|
---|
143 |
=item -rzfs <zfs> |
---|
144 |
|
---|
145 |
Specify the remote ZFS filesystem that is the target for a restore |
---|
146 |
operation. If omitted the command becomes interactive. |
---|
147 |
|
---|
148 |
=item -h <host> |
---|
149 |
|
---|
150 |
Filters the operation to the host specified. If <host> is of the form |
---|
151 |
/pattern/, it matches 'pattern' as a perl regular expression against available |
---|
152 |
hosts. If omitted, no limit is enforced and all hosts are used for the action. |
---|
153 |
|
---|
154 |
=item -z <zfs> |
---|
155 |
|
---|
156 |
Filters the operation to the zfs filesystem specified. If <zfs> is of the |
---|
157 |
form /pattern/, it matches 'pattern' as a perl regular expression against |
---|
158 |
available zfs filesystems. If omitted, no filter is enforced and all zfs |
---|
159 |
filesystems are used for the action. |
---|
160 |
|
---|
161 |
=back |
---|
162 |
|
---|
163 |
=head2 Actions |
---|
164 |
|
---|
165 |
=over |
---|
166 |
|
---|
167 |
=item -v |
---|
168 |
|
---|
169 |
Show the version. |
---|
170 |
|
---|
171 |
=item -l |
---|
172 |
|
---|
173 |
Show a brief listing of available backups. |
---|
174 |
|
---|
175 |
=item -s |
---|
176 |
|
---|
177 |
Like -l, -s will show a list of backups but provides additional information |
---|
178 |
about the backups including timestamp, type (full or incremental) and the |
---|
179 |
size on disk. |
---|
180 |
|
---|
181 |
=item -sx |
---|
182 |
|
---|
183 |
Shows an extended summary. In addition to the output provided by the -s |
---|
184 |
action, the -sx action will show detail for each availble backup. For |
---|
185 |
full backups, the detail will include any more recent full backups, if |
---|
186 |
they exist. For incremental backups, the detail will include any |
---|
187 |
incremental backups that are more recent than the last full backup. |
---|
188 |
|
---|
189 |
=item -sv |
---|
190 |
|
---|
191 |
Display all backups in the current store that violate the configured |
---|
192 |
retention policy. |
---|
193 |
|
---|
194 |
=item --files |
---|
195 |
|
---|
196 |
Display the on-disk file corresponding to each backup named in the output. |
---|
197 |
This is useful with the -sv flag to name violating files. Often times, |
---|
198 |
violators are filesystems that have been removed on the host machines and |
---|
199 |
zetaback can no longer back them up. Be very careful if you choose to |
---|
200 |
automate the removal of such backups as filesystems that would be backed up |
---|
201 |
by the next regular zetaback run will often show up as violators. |
---|
202 |
|
---|
203 |
=item -a |
---|
204 |
|
---|
205 |
Performs an archive. This option will look at all eligible backup points |
---|
206 |
(as restricted by -z and -h) and move those to the configured archive |
---|
207 |
directory. The recommended use is to first issue -sx --files then |
---|
208 |
carefully review available backup points and prune those that are |
---|
209 |
unneeded. Then invoke with -a to move only the remaining "desired" |
---|
210 |
backup points into the archives. Archived backups do not appear in any |
---|
211 |
listings or in the list of policy violators generated by the -sv option. |
---|
212 |
In effect, they are no longer "visible" to zetaback. |
---|
213 |
|
---|
214 |
=item -b |
---|
215 |
|
---|
216 |
Performs a backup. This option will investigate all eligible hosts, query |
---|
217 |
the available filesystems from the remote agent and determine if any such |
---|
218 |
filesystems require a new full or incremental backup to be taken. This |
---|
219 |
option may be combined with the -x option (to clean up afterwards.) |
---|
220 |
|
---|
221 |
=item -ff |
---|
222 |
|
---|
223 |
Forces a full backup to be taken on each filesystem encountered. This is |
---|
224 |
used in combination with -b. It is recommended to use this option only when |
---|
225 |
targeting specific filesystems (via the -h and -z options.) Forcing a full |
---|
226 |
backup across all machines will cause staggered backups to coalesce and |
---|
227 |
could cause performance issues. |
---|
228 |
|
---|
229 |
=item -fi |
---|
230 |
|
---|
231 |
Forces an incremental backup to be taken on each filesystem encountered. |
---|
232 |
This is used in combination with -b. It is recommended to use this option |
---|
233 |
only when targeting specific filesystems (via the -h and -z options.) Forcing |
---|
234 |
an incremental backup across all machines will cause staggered backups |
---|
235 |
to coalesce and could cause performance issues. |
---|
236 |
|
---|
237 |
=item -x |
---|
238 |
|
---|
239 |
Perform an expunge. This option will determine which, if any, of the local |
---|
240 |
backups may be deleted given the retention policy specified in the |
---|
241 |
configuration. |
---|
242 |
|
---|
243 |
=item -r |
---|
244 |
|
---|
245 |
Perform a restore. This option will operate on the specified backup and |
---|
246 |
restore it to the ZFS filesystem specified with -rzfs on the host specified |
---|
247 |
with the -rhost option. The -h, -z and -t options may be used to filter |
---|
248 |
the source backup list. If the filtered list contains more than one |
---|
249 |
source backup image, the command will act interactively. If the -rhost |
---|
250 |
and -rzfs command are not specified, the command will act interactively. |
---|
251 |
|
---|
252 |
=back |
---|
253 |
|
---|
254 |
=cut |
---|
255 |
|
---|
256 |
if($VERSION) { |
---|
257 |
print "zetaback: $version_string\n"; |
---|
258 |
exit 0; |
---|
259 |
} |
---|
260 |
|
---|
261 |
=pod |
---|
262 |
|
---|
263 |
=head1 CONFIGURATION |
---|
264 |
|
---|
265 |
The zetaback configuration file consists of a default stanza, containing |
---|
266 |
settings that can be overridden on a per-host basis. A stanza begins |
---|
267 |
either with the string 'default', or a fully-qualified hostname, with |
---|
268 |
settings enclosed in braces ({}). Single-line comments begin with a hash |
---|
269 |
('#'), and whitespace is ignored, so feel free to indent for better |
---|
270 |
readability. Every host to be backed up must have a host stanza in the |
---|
271 |
configuration file. |
---|
272 |
|
---|
273 |
=head2 Settings |
---|
274 |
|
---|
275 |
The following settings are valid in both the default and host scopes: |
---|
276 |
|
---|
277 |
=over |
---|
278 |
|
---|
279 |
=item store |
---|
280 |
|
---|
281 |
The base directory under which to keep backups. An interpolated variable |
---|
282 |
'%h' can be used, which expands to the hostname. There is no default for |
---|
283 |
this setting. |
---|
284 |
|
---|
285 |
=item archive |
---|
286 |
|
---|
287 |
The base directory under which archives are stored. The format is the same |
---|
288 |
as the store setting. This is the destination to which files are relocated |
---|
289 |
when issuing an archive action (-a). |
---|
290 |
|
---|
291 |
=item agent |
---|
292 |
|
---|
293 |
The location of the zetaback_agent binary on the host. There is no default |
---|
294 |
for this setting. |
---|
295 |
|
---|
296 |
=item time_format |
---|
297 |
|
---|
298 |
All timestamps within zetaback are in UNIX timestamp format. This setting |
---|
299 |
provides a string for formatting all timestamps on output. The sequences |
---|
300 |
available are identical to those in strftime(3). If not specified, the |
---|
301 |
default is '%Y-%m-%d %H:%M:%S'. |
---|
302 |
|
---|
303 |
=item backup_interval |
---|
304 |
|
---|
305 |
The frequency (in seconds) at which to perform incremental backups. An |
---|
306 |
incremental backup will be performed if the current time is more than |
---|
307 |
backup_interval since the last incremental backup. If there is no full backup |
---|
308 |
for a particular filesystem, then a full backup is performed. There is no |
---|
309 |
default for this setting. |
---|
310 |
|
---|
311 |
=item full_interval |
---|
312 |
|
---|
313 |
The frequency (in seconds) at which to perform full backups. A full backup will |
---|
314 |
be performed if the current time is more than full_interval since the last full |
---|
315 |
backup. |
---|
316 |
|
---|
317 |
=item retention |
---|
318 |
|
---|
319 |
The retention time (in seconds) for backups. Defaults to (14 * 86400), or two |
---|
320 |
weeks. |
---|
321 |
|
---|
322 |
=item compressionlevel |
---|
323 |
|
---|
324 |
Compress files using gzip at the specified compression level. 0 means no |
---|
325 |
compression. Accepted values are 1-9. Defaults to 1 (fastest/minimal |
---|
326 |
compression.) |
---|
327 |
|
---|
328 |
=item ssh_config |
---|
329 |
|
---|
330 |
Full path to an alternate ssh client config. This is useful for specifying a |
---|
331 |
less secure but faster cipher for some hosts, or using a different private |
---|
332 |
key. There is no default for this setting. |
---|
333 |
|
---|
334 |
=back |
---|
335 |
|
---|
336 |
=head1 CONFIGURATION EXAMPLES |
---|
337 |
|
---|
338 |
=head2 Uniform hosts |
---|
339 |
|
---|
340 |
This config results in backups stored in /var/spool/zfs_backups, with a |
---|
341 |
subdirectory for each host. Incremental backups will be performed |
---|
342 |
approximately once per day, assuming zetaback is run hourly. Full backups |
---|
343 |
will be done once per week. Time format and retention are default. |
---|
344 |
|
---|
345 |
default { |
---|
346 |
store = /var/spool/zfs_backups/%h |
---|
347 |
agent = /usr/local/bin/zetaback_agent |
---|
348 |
backup_interval = 83000 |
---|
349 |
full_interval = 604800 |
---|
350 |
} |
---|
351 |
|
---|
352 |
host1 {} |
---|
353 |
|
---|
354 |
host2 {} |
---|
355 |
|
---|
356 |
=head2 Non-uniform hosts |
---|
357 |
|
---|
358 |
Here, host1's and host2's agents are found in different places, and host2's |
---|
359 |
backups should be stored in a different path. |
---|
360 |
|
---|
361 |
default { |
---|
362 |
store = /var/spool/zfs_backups/%h |
---|
363 |
agent = /usr/local/bin/zetaback_agent |
---|
364 |
backup_interval = 83000 |
---|
365 |
full_interval = 604800 |
---|
366 |
} |
---|
367 |
|
---|
368 |
host1 { |
---|
369 |
agent = /opt/local/bin/zetaback_agent |
---|
370 |
} |
---|
371 |
|
---|
372 |
host2 { |
---|
373 |
store = /var/spool/alt_backups/%h |
---|
374 |
agent = /www/bin/zetaback_agent |
---|
375 |
} |
---|
376 |
|
---|
377 |
=cut |
---|
378 |
|
---|
379 |
# Make the parser more formal: |
---|
380 |
# config => stanza* |
---|
381 |
# stanza => string { kvp* } |
---|
382 |
# kvp => string = string |
---|
383 |
my $str_re = qr/(?:"(?:\\\\|\\"|[^"])*"|\S+)/; |
---|
384 |
my $kvp_re = qr/($str_re)\s*=\s*($str_re)/; |
---|
385 |
my $stanza_re = qr/($str_re)\s*\{((?:\s*$kvp_re)*)\s*\}/; |
---|
386 |
|
---|
387 |
sub parse_config() { |
---|
388 |
local($/); |
---|
389 |
$/ = undef; |
---|
390 |
open(CONF, "<$CONF") || die "Unable to open config file: $CONF"; |
---|
391 |
my $file = <CONF>; |
---|
392 |
# Rip comments |
---|
393 |
$file =~ s/^\s*#.*$//mg; |
---|
394 |
while($file =~ m/$stanza_re/gm) { |
---|
395 |
my $scope = $1; |
---|
396 |
my $filepart = $2; |
---|
397 |
$scope =~ s/^"(.*)"$/$1/; |
---|
398 |
$conf{$scope} ||= {}; |
---|
399 |
while($filepart =~ m/$kvp_re/gm) { |
---|
400 |
my $key = $1; |
---|
401 |
my $value = $2; |
---|
402 |
$key =~ s/^"(.*)"$/$1/; |
---|
403 |
$value =~ s/^"(.*)"$/$1/; |
---|
404 |
$conf{$scope}->{lc($key)} = $value; |
---|
405 |
} |
---|
406 |
} |
---|
407 |
close(CONF); |
---|
408 |
} |
---|
409 |
sub config_get($$) { |
---|
410 |
return $conf{$_[0]}->{$_[1]} || $conf{'default'}->{$_[1]}; |
---|
411 |
} |
---|
412 |
|
---|
413 |
sub dir_encode($) { |
---|
414 |
my $d = shift; |
---|
415 |
my $e = encode_base64($d, ''); |
---|
416 |
$e =~ s/\//_/; |
---|
417 |
return $e; |
---|
418 |
} |
---|
419 |
sub dir_decode($) { |
---|
420 |
my $e = shift; |
---|
421 |
$e =~ s/_/\//; |
---|
422 |
return decode_base64($e); |
---|
423 |
} |
---|
424 |
sub pretty_size($) { |
---|
425 |
my $bytes = shift; |
---|
426 |
if($bytes > 1024*1024*1024) { |
---|
427 |
return sprintf("%0.2f Gb", $bytes / (1024*1024*1024)); |
---|
428 |
} |
---|
429 |
if($bytes > 1024*1024) { |
---|
430 |
return sprintf("%0.2f Mb", $bytes / (1024*1024)); |
---|
431 |
} |
---|
432 |
if($bytes > 1024) { |
---|
433 |
return sprintf("%0.2f Kb", $bytes / (1024)); |
---|
434 |
} |
---|
435 |
return "$bytes b"; |
---|
436 |
} |
---|
437 |
sub lock($;$$) { |
---|
438 |
my ($host, $file, $nowait) = @_; |
---|
439 |
print "Acquiring lock for $host:$file\n" if($DEBUG); |
---|
440 |
$file ||= 'master.lock'; |
---|
441 |
my $store = config_get($host, 'store'); |
---|
442 |
$store =~ s/%h/$host/g; |
---|
443 |
return 1 if(exists($locks{"$host:$file"})); |
---|
444 |
open(LOCK, "+>>$store/$file") || return 0; |
---|
445 |
unless(flock(LOCK, LOCK_EX | ($nowait ? LOCK_NB : 0))) { |
---|
446 |
close(LOCK); |
---|
447 |
return 0; |
---|
448 |
} |
---|
449 |
$locks{"$host:$file"} = \*LOCK; |
---|
450 |
return 1; |
---|
451 |
} |
---|
452 |
sub unlock($;$$) { |
---|
453 |
my ($host, $file, $remove) = @_; |
---|
454 |
print "Releasing lock for $host:$file\n" if($DEBUG); |
---|
455 |
$file ||= 'master.lock'; |
---|
456 |
my $store = config_get($host, 'store'); |
---|
457 |
$store =~ s/%h/$host/g; |
---|
458 |
return 0 unless(exists($locks{"$host:$file"})); |
---|
459 |
*UNLOCK = $locks{$file}; |
---|
460 |
unlink("$store/$file") if($remove); |
---|
461 |
flock(UNLOCK, LOCK_UN); |
---|
462 |
close(UNLOCK); |
---|
463 |
return 1; |
---|
464 |
} |
---|
465 |
sub scan_for_backups($) { |
---|
466 |
my %info = (); |
---|
467 |
my $dir = shift; |
---|
468 |
$info{last_full} = $info{last_incremental} = $info{last_backup} = 0; |
---|
469 |
opendir(D, $dir) || return \%info; |
---|
470 |
foreach my $file (readdir(D)) { |
---|
471 |
if($file =~ /^(\d+)\.([^\.]+)\.full$/) { |
---|
472 |
my $whence = $1; |
---|
473 |
my $fs = dir_decode($2); |
---|
474 |
$info{$fs}->{full}->{$whence}->{'file'} = "$dir/$file"; |
---|
475 |
$info{$fs}->{last_full} = $whence if($whence > $info{$fs}->{last_full}); |
---|
476 |
$info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ? |
---|
477 |
$info{$fs}->{last_incremental} : $info{$fs}->{last_full}; |
---|
478 |
} |
---|
479 |
elsif($file =~ /^(\d+).([^\.]+)\.incremental.(\d+)$/) { |
---|
480 |
my $whence = $1; |
---|
481 |
my $fs = dir_decode($2); |
---|
482 |
$info{$fs}->{incremental}->{$whence}->{'depends'} = $3; |
---|
483 |
$info{$fs}->{incremental}->{$whence}->{'file'} = "$dir/$file"; |
---|
484 |
$info{$fs}->{last_incremental} = $whence if($whence > $info{$fs}->{last_incremental}); |
---|
485 |
$info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ? |
---|
486 |
$info{$fs}->{last_incremental} : $info{$fs}->{last_full}; |
---|
487 |
} |
---|
488 |
} |
---|
489 |
closedir(D); |
---|
490 |
return \%info; |
---|
491 |
} |
---|
492 |
|
---|
493 |
parse_config(); |
---|
494 |
|
---|
495 |
sub zetaback_log($$;@) { |
---|
496 |
my ($host, $mess, @args) = @_; |
---|
497 |
my $tf = config_get($host, 'time_format'); |
---|
498 |
my $file = config_get($host, 'logfile'); |
---|
499 |
my $fileh; |
---|
500 |
if(defined($file)) { |
---|
501 |
$fileh = IO::File->new(">>$file"); |
---|
502 |
} |
---|
503 |
$fileh ||= IO::File->new(">&STDERR"); |
---|
504 |
printf $fileh "%s: $mess", strftime($tf, localtime(time)), @args; |
---|
505 |
$fileh->close(); |
---|
506 |
} |
---|
507 |
|
---|
508 |
sub zfs_remove_snap($$$) { |
---|
509 |
my ($host, $fs, $snap) = @_; |
---|
510 |
my $agent = config_get($host, 'agent'); |
---|
511 |
my $ssh_config = config_get($host, 'ssh_config'); |
---|
512 |
$ssh_config = "-F $ssh_config" if($ssh_config); |
---|
513 |
return unless($snap); |
---|
514 |
print "Dropping $snap on $fs\n" if($DEBUG); |
---|
515 |
`ssh $ssh_config $host $agent -z $fs -d $snap`; |
---|
516 |
} |
---|
517 |
|
---|
518 |
# Lots of args.. internally called. |
---|
519 |
sub zfs_do_backup($$$$$$) { |
---|
520 |
my ($host, $fs, $type, $point, $store, $dumpfile) = @_; |
---|
521 |
my $agent = config_get($host, 'agent'); |
---|
522 |
my $ssh_config = config_get($host, 'ssh_config'); |
---|
523 |
$ssh_config = "-F $ssh_config" if($ssh_config); |
---|
524 |
|
---|
525 |
# Do it. yeah. |
---|
526 |
my $cl = config_get($host, 'compressionlevel'); |
---|
527 |
if ($cl >= 1 && $cl <= 9) { |
---|
528 |
open(LBACKUP, "|gzip -$cl >$store/.$dumpfile") || |
---|
529 |
die "zfs_full_backup: cannot create dump\n"; |
---|
530 |
} else { |
---|
531 |
open(LBACKUP, ">$store/.$dumpfile") || |
---|
532 |
die "zfs_full_backup: cannot create dump\n"; |
---|
533 |
} |
---|
534 |
eval { |
---|
535 |
if(my $pid = fork()) { |
---|
536 |
close(LBACKUP); |
---|
537 |
waitpid($pid, 0); |
---|
538 |
die "error: $?" if($?); |
---|
539 |
} |
---|
540 |
else { |
---|
541 |
my @cmd = ('ssh', $ssh_config, $host, $agent, '-z', $fs, "-$type", $point); |
---|
542 |
open STDIN, "/dev/null" || exit(-1); |
---|
543 |
open STDOUT, ">&LBACKUP" || exit(-1); |
---|
544 |
exec { $cmd[0] } @cmd; |
---|
545 |
print STDERR "$cmd[0] failed: $?\n"; |
---|
546 |
exit($?); |
---|
547 |
} |
---|
548 |
die "dump failed (zero bytes)\n" if(-z "$store/.$dumpfile"); |
---|
549 |
rename("$store/.$dumpfile", "$store/$dumpfile") || die "cannot rename dump\n"; |
---|
550 |
}; |
---|
551 |
if($@) { |
---|
552 |
unlink("$store/.$dumpfile"); |
---|
553 |
chomp(my $error = $@); |
---|
554 |
$error =~ s/[\r\n]+/ /gsm; |
---|
555 |
zetaback_log($host, "FAILED[$error] $host:$fs $type\n"); |
---|
556 |
die "zfs_full_backup: failed $@"; |
---|
557 |
} |
---|
558 |
my @st = stat("$store/$dumpfile"); |
---|
559 |
my $size = pretty_size($st[7]); |
---|
560 |
zetaback_log($host, "SUCCESS[$size] $host:$fs $type\n"); |
---|
561 |
} |
---|
562 |
|
---|
563 |
sub zfs_full_backup($$$) { |
---|
564 |
my ($host, $fs, $store) = @_; |
---|
565 |
|
---|
566 |
# Translate into a proper dumpfile nameA |
---|
567 |
my $point = time(); |
---|
568 |
my $efs = dir_encode($fs); |
---|
569 |
my $dumpfile = "$point.$efs.full"; |
---|
570 |
|
---|
571 |
zfs_do_backup($host, $fs, 'f', $point, $store, $dumpfile); |
---|
572 |
} |
---|
573 |
|
---|
574 |
sub zfs_incremental_backup($$$$) { |
---|
575 |
my ($host, $fs, $base, $store) = @_; |
---|
576 |
my $agent = config_get($host, 'agent'); |
---|
577 |
|
---|
578 |
# Translate into a proper dumpfile nameA |
---|
579 |
my $point = time(); |
---|
580 |
my $efs = dir_encode($fs); |
---|
581 |
my $dumpfile = "$point.$efs.incremental.$base"; |
---|
582 |
|
---|
583 |
zfs_do_backup($host, $fs, 'i', $base, $store, $dumpfile); |
---|
584 |
} |
---|
585 |
|
---|
586 |
sub perform_retention($$) { |
---|
587 |
my ($host, $store) = @_; |
---|
588 |
my $cutoff = time() - config_get($host, 'retention'); |
---|
589 |
my $backup_info = scan_for_backups($store); |
---|
590 |
|
---|
591 |
foreach my $disk (sort keys %{$backup_info}) { |
---|
592 |
my $info = $backup_info->{$disk}; |
---|
593 |
next unless(ref($info) eq 'HASH'); |
---|
594 |
my %must_save; |
---|
595 |
|
---|
596 |
# Get a list of all the full and incrementals, sorts newest to oldest |
---|
597 |
my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}}); |
---|
598 |
@backup_points = sort { $b <=> $a } @backup_points; |
---|
599 |
|
---|
600 |
# We _cannot_ throw away _all_ our backups, |
---|
601 |
# so save the most recent incremental and full no matter what |
---|
602 |
$must_save{$backup_points[0]} = 1; |
---|
603 |
my @fulls = grep { exists($info->{full}->{$_}) } @backup_points; |
---|
604 |
$must_save{$fulls[0]} = 1; |
---|
605 |
|
---|
606 |
# Walk the list for backups within our retention period. |
---|
607 |
foreach (@backup_points) { |
---|
608 |
if($_ >= $cutoff) { |
---|
609 |
$must_save{$_} = 1; |
---|
610 |
} |
---|
611 |
else { |
---|
612 |
# they are in decending order, once we miss, all will miss |
---|
613 |
last; |
---|
614 |
} |
---|
615 |
} |
---|
616 |
|
---|
617 |
# Look for dependencies |
---|
618 |
foreach (@backup_points) { |
---|
619 |
if(exists($info->{incremental}->{$_})) { |
---|
620 |
print " => $_ depends on $info->{incremental}->{$_}->{depends}\n" if($DEBUG); |
---|
621 |
$must_save{$info->{incremental}->{$_}->{depends}} = 1 |
---|
622 |
} |
---|
623 |
} |
---|
624 |
my @removals = grep { !exists($must_save{$_}) } @backup_points; |
---|
625 |
if($DEBUG) { |
---|
626 |
my $tf = config_get($host, 'time_format'); |
---|
627 |
print " => I can remove:\n"; |
---|
628 |
foreach (@backup_points) { |
---|
629 |
print " => ". strftime($tf, localtime($_)); |
---|
630 |
print " [". (exists($info->{full}->{$_}) ? "full":"incremental") ."]"; |
---|
631 |
print " XXX" if(!exists($must_save{$_})); |
---|
632 |
print "\n"; |
---|
633 |
} |
---|
634 |
} |
---|
635 |
foreach (@removals) { |
---|
636 |
my $efs = dir_encode($disk); |
---|
637 |
my $filename; |
---|
638 |
if(exists($info->{full}->{$_})) { |
---|
639 |
$filename = "$store/$_.$efs.full"; |
---|
640 |
} |
---|
641 |
elsif(exists($info->{incremental}->{$_})) { |
---|
642 |
$filename = "$store/$_.$efs.incremental.$info->{incremental}->{$_}->{depends}"; |
---|
643 |
} |
---|
644 |
else { |
---|
645 |
print "ERROR: We tried to expunge $host $disk [$_], but couldn't find it.\n"; |
---|
646 |
} |
---|
647 |
print " => expunging $filename\n" if($DEBUG); |
---|
648 |
unless($NEUTERED) { |
---|
649 |
unlink($filename) || print "ERROR: unlink $filename: $?\n"; |
---|
650 |
} |
---|
651 |
} |
---|
652 |
} |
---|
653 |
} |
---|
654 |
|
---|
655 |
sub __default_sort($$) { return $_[0] cmp $_[1]; } |
---|
656 |
|
---|
657 |
sub choose($$;$) { |
---|
658 |
my($name, $obj, $sort) = @_; |
---|
659 |
$sort ||= \&__default_sort;; |
---|
660 |
my @list; |
---|
661 |
my $hash; |
---|
662 |
if(ref $obj eq 'ARRAY') { |
---|
663 |
@list = sort { $sort->($a,$b); } (@$obj); |
---|
664 |
map { $hash->{$_} = $_; } @list; |
---|
665 |
} |
---|
666 |
elsif(ref $obj eq 'HASH') { |
---|
667 |
@list = sort { $sort->($a,$b); } (keys %$obj); |
---|
668 |
$hash = $obj; |
---|
669 |
} |
---|
670 |
else { |
---|
671 |
die "choose passed bad object: " . ref($obj) . "\n"; |
---|
672 |
} |
---|
673 |
return $list[0] if(scalar(@list) == 1); |
---|
674 |
print "\n"; |
---|
675 |
my $i = 1; |
---|
676 |
for (@list) { |
---|
677 |
printf " %3d) $hash->{$_}\n", $i++; |
---|
678 |
} |
---|
679 |
my $selection = 0; |
---|
680 |
while($selection !~ /^\d+$/ or |
---|
681 |
$selection < 1 or |
---|
682 |
$selection >= $i) { |
---|
683 |
print "$name: "; |
---|
684 |
chomp($selection = <>); |
---|
685 |
} |
---|
686 |
return $list[$selection - 1]; |
---|
687 |
} |
---|
688 |
|
---|
689 |
sub backup_chain($$) { |
---|
690 |
my ($info, $ts) = @_; |
---|
691 |
my @list; |
---|
692 |
push @list, $info->{full}->{$ts} if(exists($info->{full}->{$ts})); |
---|
693 |
if(exists($info->{incremental}->{$ts})) { |
---|
694 |
push @list, $info->{incremental}->{$ts}; |
---|
695 |
push @list, backup_chain($info, $info->{incremental}->{$ts}->{depends}); |
---|
696 |
} |
---|
697 |
return @list; |
---|
698 |
} |
---|
699 |
|
---|
700 |
sub perform_restore() { |
---|
701 |
my %source; |
---|
702 |
|
---|
703 |
foreach my $host (grep { $_ ne "default" } keys %conf) { |
---|
704 |
# If -h was specific, we will skip this host if the arg isn't |
---|
705 |
# an exact match or a pattern match |
---|
706 |
if($HOST && |
---|
707 |
!(($HOST eq $host) || |
---|
708 |
($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) { |
---|
709 |
next; |
---|
710 |
} |
---|
711 |
|
---|
712 |
my $store = config_get($host, 'store'); |
---|
713 |
$store =~ s/%h/$host/g;; |
---|
714 |
mkdir $store if(! -d $store); |
---|
715 |
|
---|
716 |
my $backup_info = scan_for_backups($store); |
---|
717 |
foreach my $disk (sort keys %{$backup_info}) { |
---|
718 |
my $info = $backup_info->{$disk}; |
---|
719 |
next unless(ref($info) eq 'HASH'); |
---|
720 |
next |
---|
721 |
if($ZFS && # if the pattern was specified it could |
---|
722 |
!($disk eq $ZFS || # be a specific match or a |
---|
723 |
($ZFS =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex |
---|
724 |
# We want to see this one |
---|
725 |
my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}}); |
---|
726 |
my @source_points; |
---|
727 |
foreach (@backup_points) { |
---|
728 |
push @source_points, $_ if(!$TIMESTAMP || $TIMESTAMP == $_) |
---|
729 |
} |
---|
730 |
if(@source_points) { |
---|
731 |
$source{$host}->{$disk} = \@source_points; |
---|
732 |
} |
---|
733 |
} |
---|
734 |
} |
---|
735 |
|
---|
736 |
if(! keys %source) { |
---|
737 |
print "No matching backups found\n"; |
---|
738 |
return; |
---|
739 |
} |
---|
740 |
|
---|
741 |
# Here goes the possibly interactive dialog |
---|
742 |
my $host = choose("Restore from host", [keys %source]); |
---|
743 |
my $disk = choose("Restore from ZFS", [keys %{$source{$host}}]); |
---|
744 |
|
---|
745 |
# Times are special. We build a human readable form and use a numerical |
---|
746 |
# sort function instead of the default lexical one. |
---|
747 |
my %times; |
---|
748 |
my $tf = config_get($host, 'time_format'); |
---|
749 |
map { $times{$_} = strftime($tf, localtime($_)); } @{$source{$host}->{$disk}}; |
---|
750 |
my $timestamp = choose("Restore as of timestamp", \%times, |
---|
751 |
sub { $_[0] <=> $_[1]; }); |
---|
752 |
|
---|
753 |
my $store = config_get($host, 'store'); |
---|
754 |
$store =~ s/%h/$host/g;; |
---|
755 |
mkdir $store if(! -d $store); |
---|
756 |
my $backup_info = scan_for_backups($store); |
---|
757 |
my @backup_list = reverse backup_chain($backup_info->{$disk}, $timestamp); |
---|
758 |
|
---|
759 |
if(!$RESTORE_HOST) { |
---|
760 |
print "Restore to host [$host]:"; |
---|
761 |
chomp(my $input = <>); |
---|
762 |
$RESTORE_HOST = length($input) ? $input : $host; |
---|
763 |
} |
---|
764 |
if(!$RESTORE_ZFS) { |
---|
765 |
print "Restore to zfs [$disk]:"; |
---|
766 |
chomp(my $input = <>); |
---|
767 |
$RESTORE_ZFS = length($input) ? $input : $disk; |
---|
768 |
} |
---|
769 |
|
---|
770 |
# show intentions |
---|
771 |
print "Going to restore:\n"; |
---|
772 |
print "\tfrom: $host\n"; |
---|
773 |
print "\tfrom: $disk\n"; |
---|
774 |
print "\t at: $timestamp [" . strftime($tf, localtime($timestamp)) . "]\n"; |
---|
775 |
print "\t to: $RESTORE_HOST\n"; |
---|
776 |
print "\t to: $RESTORE_ZFS\n"; |
---|
777 |
print "\n"; |
---|
778 |
|
---|
779 |
foreach(@backup_list) { |
---|
780 |
$_->{success} = zfs_restore_part($RESTORE_HOST, $RESTORE_ZFS, $_->{file}, $_->{depends}); |
---|
781 |
} |
---|
782 |
} |
---|
783 |
|
---|
784 |
sub zfs_restore_part($$$;$) { |
---|
785 |
my ($host, $fs, $file, $dep) = @_; |
---|
786 |
my $ssh_config = config_get($host, 'ssh_config'); |
---|
787 |
$ssh_config = "-F $ssh_config" if($ssh_config); |
---|
788 |
my $command; |
---|
789 |
if(exists($conf{$host})) { |
---|
790 |
my $agent = config_get($host, 'agent'); |
---|
791 |
$command = "$agent -r -z $fs"; |
---|
792 |
$command .= " -b $dep" if($dep); |
---|
793 |
} |
---|
794 |
else { |
---|
795 |
$command = "__ZFS__ recv $fs"; |
---|
796 |
} |
---|
797 |
print " => piping $file to $command\n" if($DEBUG); |
---|
798 |
if($NEUTERED) { |
---|
799 |
print "gzip -dfc $file | ssh $ssh_config $host $command\n" if ($DEBUG); |
---|
800 |
} |
---|
801 |
else { |
---|
802 |
open(DUMP, "gzip -dfc $file |"); |
---|
803 |
eval { |
---|
804 |
open(RECEIVER, "| ssh $ssh_config $host $command"); |
---|
805 |
my $buffer; |
---|
806 |
while(my $len = sysread(DUMP, $buffer, $BLOCKSIZE)) { |
---|
807 |
if(syswrite(RECEIVER, $buffer, $len) != $len) { |
---|
808 |
die "$!"; |
---|
809 |
} |
---|
810 |
} |
---|
811 |
}; |
---|
812 |
close(DUMP); |
---|
813 |
close(RECEIVER); |
---|
814 |
} |
---|
815 |
return $?; |
---|
816 |
} |
---|
817 |
|
---|
818 |
sub pretty_print_backup($$$) { |
---|
819 |
my ($info, $host, $point) = @_; |
---|
820 |
my $tf = config_get($host, 'time_format'); |
---|
821 |
print "\t" . strftime($tf, localtime($point)) . " [$point] "; |
---|
822 |
if(exists($info->{full}->{$point})) { |
---|
823 |
my @st = stat($info->{full}->{$point}->{file}); |
---|
824 |
print "FULL " . pretty_size($st[7]); |
---|
825 |
print "\n\tfile: $info->{full}->{$point}->{file}" if($SHOW_FILENAMES); |
---|
826 |
} else { |
---|
827 |
my @st = stat($info->{incremental}->{$point}->{file}); |
---|
828 |
print "INCR from [$info->{incremental}->{$point}->{depends}] " . pretty_size($st[7]); |
---|
829 |
print "\n\tfile: $info->{incremental}->{$point}->{file}" if($SHOW_FILENAMES); |
---|
830 |
} |
---|
831 |
print "\n"; |
---|
832 |
} |
---|
833 |
|
---|
834 |
sub show_backups($$$) { |
---|
835 |
my ($host, $store, $diskpat) = @_; |
---|
836 |
my $backup_info = scan_for_backups($store); |
---|
837 |
my $tf = config_get($host, 'time_format'); |
---|
838 |
my @files; |
---|
839 |
foreach my $disk (sort keys %{$backup_info}) { |
---|
840 |
my $info = $backup_info->{$disk}; |
---|
841 |
next unless(ref($info) eq 'HASH'); |
---|
842 |
next |
---|
843 |
if($diskpat && # if the pattern was specified it could |
---|
844 |
!($disk eq $diskpat || # be a specific match or a |
---|
845 |
($diskpat =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex |
---|
846 |
|
---|
847 |
my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}}); |
---|
848 |
@backup_points = sort { $a <=> $b } @backup_points; |
---|
849 |
@backup_points = (pop @backup_points) unless ($ARCHIVE || $SUMMARY_EXT || $SUMMARY_VIOLATORS); |
---|
850 |
|
---|
851 |
# Quick short-circuit in the case of retention violation checks |
---|
852 |
if($SUMMARY_VIOLATORS) { |
---|
853 |
if(time() > $info->{last_full} + config_get($host, 'full_interval') || |
---|
854 |
time() > $info->{last_backup} + config_get($host, 'backup_interval')) { |
---|
855 |
print "$host:$disk\n"; |
---|
856 |
pretty_print_backup($info, $host, $backup_points[0]); |
---|
857 |
} |
---|
858 |
next; |
---|
859 |
} |
---|
860 |
|
---|
861 |
# We want to see this one |
---|
862 |
print "$host:$disk\n"; |
---|
863 |
next unless($SUMMARY || $SUMMARY_EXT || $ARCHIVE); |
---|
864 |
if($SUMMARY_EXT) { |
---|
865 |
print "\tLast Full: ". ($info->{last_full} ? strftime($tf, localtime($info->{last_full})) : "Never") . "\n"; |
---|
866 |
if($info->{last_full} < $info->{last_incremental}) { |
---|
867 |
print "\tLast Incr: ". strftime($tf, localtime($info->{last_incremental})). "\n"; |
---|
868 |
} |
---|
869 |
} |
---|
870 |
foreach (@backup_points) { |
---|
871 |
pretty_print_backup($info, $host, $_); |
---|
872 |
push @files, exists($info->{full}->{$_}) ? $info->{full}->{$_}->{file} : $info->{incremental}->{$_}->{file}; |
---|
873 |
} |
---|
874 |
print "\n"; |
---|
875 |
} |
---|
876 |
if($ARCHIVE && scalar(@files)) { |
---|
877 |
my $archive = config_get($host, 'archive'); |
---|
878 |
$archive =~ s/%h/$host/g; |
---|
879 |
if(! -d $archive) { |
---|
880 |
mkdir $archive || die "Cannot mkdir($archive)\n"; |
---|
881 |
} |
---|
882 |
print "\nAre you sure you would like to archive ".scalar(@files)." file(s)? "; |
---|
883 |
while(($_ = <>) !~ /(?:y|n|yes|no)$/i) { |
---|
884 |
print "Are you sure you would like to archive ".scalar(@files)." file(s)? "; |
---|
885 |
} |
---|
886 |
if(/^y/i) { |
---|
887 |
foreach my $file (@files) { |
---|
888 |
(my $afile = $file) =~ s/^$store/$archive/; |
---|
889 |
move($file, $afile) || print "Error archiving $file: $!\n"; |
---|
890 |
} |
---|
891 |
} |
---|
892 |
} |
---|
893 |
} |
---|
894 |
|
---|
895 |
sub plan_and_run($$$) { |
---|
896 |
my ($host, $store, $diskpat) = @_; |
---|
897 |
my $ssh_config = config_get($host, 'ssh_config'); |
---|
898 |
$ssh_config = "-F $ssh_config" if($ssh_config); |
---|
899 |
my %suppress; |
---|
900 |
print "Planning '$host'\n" if($DEBUG); |
---|
901 |
my $agent = config_get($host, 'agent'); |
---|
902 |
my $took_action = 1; |
---|
903 |
while($took_action) { |
---|
904 |
$took_action = 0; |
---|
905 |
my @disklist; |
---|
906 |
|
---|
907 |
# We need a lock for the listing. |
---|
908 |
return unless(lock($host, ".list")); |
---|
909 |
open(SILENT, ">&", \*STDERR); |
---|
910 |
close(STDERR); |
---|
911 |
my $rv = open(ZFSLIST, "ssh $ssh_config $host $agent -l |"); |
---|
912 |
open(STDERR, ">&", \*SILENT); |
---|
913 |
close(SILENT); |
---|
914 |
next unless $rv; |
---|
915 |
@disklist = grep { chomp } (<ZFSLIST>); |
---|
916 |
close(ZFSLIST); |
---|
917 |
|
---|
918 |
foreach my $diskline (@disklist) { |
---|
919 |
chomp($diskline); |
---|
920 |
next unless($diskline =~ /^(\S+) \[([^\]]*)\]/); |
---|
921 |
my $diskname = $1; |
---|
922 |
my %snaps; |
---|
923 |
map { $snaps{$_} = 1 } (split(/,/, $2)); |
---|
924 |
|
---|
925 |
# We've just done this. |
---|
926 |
next if($suppress{"$host:$diskname"}); |
---|
927 |
# If we are being selective (via -z) now is the time. |
---|
928 |
next |
---|
929 |
if($diskpat && # if the pattern was specified it could |
---|
930 |
!($diskname eq $diskpat || # be a specific match or a |
---|
931 |
($diskpat =~ /^\/(.+)\/$/ && $diskname =~ /$1/))); # regex |
---|
932 |
|
---|
933 |
print " => Scanning '$store' for old backups of '$diskname'.\n" if($DEBUG); |
---|
934 |
|
---|
935 |
# Make directory on demand |
---|
936 |
my $backup_info = scan_for_backups($store); |
---|
937 |
# That gave us info on all backups, we just want this disk |
---|
938 |
$backup_info = $backup_info->{$diskname} || {}; |
---|
939 |
|
---|
940 |
# Should we do a backup? |
---|
941 |
my $backup_type = 'no'; |
---|
942 |
if(time() > $backup_info->{last_backup} + config_get($host, 'backup_interval')) { |
---|
943 |
$backup_type = 'incremental'; |
---|
944 |
} |
---|
945 |
if(time() > $backup_info->{last_full} + config_get($host, 'full_interval')) { |
---|
946 |
$backup_type = 'full'; |
---|
947 |
} |
---|
948 |
|
---|
949 |
# If we want an incremental, but have no full, then we need to upgrade to full |
---|
950 |
if($backup_type eq 'incremental') { |
---|
951 |
my $have_full_locally = 0; |
---|
952 |
# For each local full backup, see if the full backup still exists on the other end. |
---|
953 |
foreach (keys %{$backup_info->{'full'}}) { |
---|
954 |
$have_full_locally = 1 if(exists($snaps{'__zb_full_' . $_})); |
---|
955 |
} |
---|
956 |
$backup_type = 'full' unless($have_full_locally); |
---|
957 |
} |
---|
958 |
$backup_type = 'full' if($FORCE_FULL); |
---|
959 |
$backup_type = 'incremental' if($FORCE_INC); |
---|
960 |
|
---|
961 |
print " => doing $backup_type backup\n" if($DEBUG); |
---|
962 |
# We need to drop a __zb_base snap or a __zb_incr snap before we proceed |
---|
963 |
unless($NEUTERED || $backup_type eq 'no') { |
---|
964 |
# attempt to lock this action, if it fails, skip -- someone else is working it. |
---|
965 |
next unless(lock($host, dir_encode($diskname), 1)); |
---|
966 |
unlock($host, '.list'); |
---|
967 |
|
---|
968 |
if($backup_type eq 'full') { |
---|
969 |
eval { zfs_full_backup($host, $diskname, $store); }; |
---|
970 |
if ($@) { |
---|
971 |
chomp(my $err = $@); |
---|
972 |
print " => failure $err\n"; |
---|
973 |
} |
---|
974 |
else { |
---|
975 |
# Unless there was an error backing up, remove all the other full snaps |
---|
976 |
foreach (keys %snaps) { |
---|
977 |
zfs_remove_snap($host, $diskname, $_) if(/^__zb_full_(\d+)/) |
---|
978 |
} |
---|
979 |
} |
---|
980 |
$took_action = 1; |
---|
981 |
} |
---|
982 |
if($backup_type eq 'incremental') { |
---|
983 |
zfs_remove_snap($host, $diskname, '__zb_incr') if($snaps{'__zb_incr'}); |
---|
984 |
# Find the newest full from which to do an incremental (NOTE: reverse numeric sort) |
---|
985 |
my @fulls = sort { $b <=> $a } (keys %{$backup_info->{'full'}}); |
---|
986 |
zfs_incremental_backup($host, $diskname, $fulls[0], $store); |
---|
987 |
$took_action = 1; |
---|
988 |
} |
---|
989 |
unlock($host, dir_encode($diskname), 1); |
---|
990 |
} |
---|
991 |
$suppress{"$host:$diskname"} = 1; |
---|
992 |
last if($took_action); |
---|
993 |
} |
---|
994 |
unlock($host, '.list'); |
---|
995 |
} |
---|
996 |
} |
---|
997 |
|
---|
998 |
if($RESTORE) { |
---|
999 |
perform_restore(); |
---|
1000 |
} |
---|
1001 |
else { |
---|
1002 |
foreach my $host (grep { $_ ne "default" } keys %conf) { |
---|
1003 |
# If -h was specific, we will skip this host if the arg isn't |
---|
1004 |
# an exact match or a pattern match |
---|
1005 |
if($HOST && |
---|
1006 |
!(($HOST eq $host) || |
---|
1007 |
($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) { |
---|
1008 |
next; |
---|
1009 |
} |
---|
1010 |
|
---|
1011 |
my $store = config_get($host, 'store'); |
---|
1012 |
$store =~ s/%h/$host/g;; |
---|
1013 |
mkdir $store if(! -d $store); |
---|
1014 |
|
---|
1015 |
if($LIST || $SUMMARY || $SUMMARY_EXT || $SUMMARY_VIOLATORS || $ARCHIVE) { |
---|
1016 |
show_backups($host, $store, $ZFS); |
---|
1017 |
} |
---|
1018 |
if($BACKUP) { |
---|
1019 |
plan_and_run($host, $store, $ZFS); |
---|
1020 |
} |
---|
1021 |
if($EXPUNGE) { |
---|
1022 |
perform_retention($host, $store); |
---|
1023 |
} |
---|
1024 |
} |
---|
1025 |
} |
---|
1026 |
|
---|
1027 |
exit 0; |
---|
1028 |
|
---|
1029 |
=pod |
---|
1030 |
|
---|
1031 |
=head1 FILES |
---|
1032 |
|
---|
1033 |
=over |
---|
1034 |
|
---|
1035 |
=item /usr/local/etc/zetaback.conf |
---|
1036 |
|
---|
1037 |
The main zetaback configuration file. The location of the file can be |
---|
1038 |
specified on the command line with the -c flag. The prefix of this |
---|
1039 |
file may also be specified as an argument to the configure script. |
---|
1040 |
|
---|
1041 |
=back |
---|
1042 |
|
---|
1043 |
=head1 SEE ALSO |
---|
1044 |
|
---|
1045 |
zetaback_agent(1) |
---|
1046 |
|
---|
1047 |
=cut |
---|