root/zetaback.in

Revision 68a67fce2d353395b86cec58f2bf0c071c22e99b, 45.6 kB (checked in by Mark Harrison <mark@omniti.com>, 9 years ago)

We already have the value for $store, don't fetch again

  • Property mode set to 100755
Line 
1 #!/usr/bin/perl
2 # vim: sts=2 sw=2 ts=8 et
3
4 # Copyright (c) 2007 OmniTI Computer Consulting, Inc. All rights reserved.
5 # For information on licensing see:
6 #   https://labs.omniti.com/zetaback/trunk/LICENSE
7
8 use strict;
9 use Getopt::Long;
10 use MIME::Base64;
11 use POSIX qw/strftime/;
12 use Fcntl qw/:flock/;
13 use File::Path qw/mkpath/;
14 use File::Copy;
15 use IO::File;
16 use Pod::Usage;
17
18 use vars qw/%conf %locks $version_string
19             $PREFIX $CONF $BLOCKSIZE $DEBUG $HOST $BACKUP
20             $RESTORE $RESTORE_HOST $RESTORE_ZFS $TIMESTAMP
21             $LIST $SUMMARY $SUMMARY_EXT $SUMMARY_VIOLATORS
22             $FORCE_FULL $FORCE_INC $EXPUNGE $NEUTERED $ZFS
23             $SHOW_FILENAMES $ARCHIVE $VERSION $HELP/;
24 $version_string = q$URL$;
25 $version_string =~ s#/branches/#/b#;
26 $version_string =~ s#^.*/([^/]+)/[^/]+$#$1#;
27 $PREFIX = q^__PREFIX__^;
28 $CONF = qq^$PREFIX/etc/zetaback.conf^;
29 $BLOCKSIZE = 1024*64;
30
31 $conf{'default'}->{'time_format'} = "%Y-%m-%d %H:%M:%S";
32 $conf{'default'}->{'retention'} = 14 * 86400;
33 $conf{'default'}->{'compressionlevel'} = 1;
34 $conf{'default'}->{'dataset_backup'} = 0;
35
36 =pod
37
38 =head1 NAME
39
40 zetaback - perform backup, restore and retention policies for ZFS backups.
41
42 =head1 SYNOPSIS
43
44   zetaback -v
45
46   zetaback [-l | -s | -sx | -sv] [--files] [-c conf] [-d] [-h host] [-z zfs]
47
48   zetaback -a [-c conf] [-d] [-h host] [-z zfs]
49
50   zetaback -b [-ff] [-fi] [-x] [-c conf] [-d] [-n] [-h host] [-z zfs]
51
52   zetaback -x [-b] [-c conf] [-d] [-n] [-h host] [-z zfs]
53
54   zetaback -r [-c conf] [-d] [-n] [-h host] [-z zfs] [-t timestamp]
55               [-rhost host] [-rzfs fs]
56
57 =cut
58
59 GetOptions(
60   "h=s"     => \$HOST,
61   "z=s"     => \$ZFS,
62   "c=s"     => \$CONF,
63   "a"       => \$ARCHIVE,
64   "b"       => \$BACKUP,
65   "l"       => \$LIST,
66   "s"       => \$SUMMARY,
67   "sx"      => \$SUMMARY_EXT,
68   "sv"      => \$SUMMARY_VIOLATORS,
69   "r"       => \$RESTORE,
70   "t=i"     => \$TIMESTAMP,
71   "rhost=s" => \$RESTORE_HOST,
72   "rzfs=s"  => \$RESTORE_ZFS,
73   "d"       => \$DEBUG,
74   "n"       => \$NEUTERED,
75   "x"       => \$EXPUNGE,
76   "v"       => \$VERSION,
77   "ff"      => \$FORCE_FULL,
78   "fi"      => \$FORCE_INC,
79   "files"   => \$SHOW_FILENAMES,
80 );
81
82 # actions allowed together 'x' and 'b' all others are exclusive:
83 my $actions = 0;
84 $actions++ if($ARCHIVE);
85 $actions++ if($BACKUP || $EXPUNGE);
86 $actions++ if($RESTORE);
87 $actions++ if($LIST);
88 $actions++ if($SUMMARY);
89 $actions++ if($SUMMARY_EXT);
90 $actions++ if($SUMMARY_VIOLATORS);
91 $actions++ if($VERSION);
92 $actions++ if($BACKUP && $FORCE_FULL && $FORCE_INC);
93 if($actions != 1) {
94   pod2usage({ -verbose => 0 });
95   exit -1;
96 }
97
98 =pod
99
100 =head1 DESCRIPTION
101
102 The B<zetaback> program orchestrates the backup (either full or
103 incremental) of remote ZFS filesystems to a local store.  It handles
104 frequency requirements for both full and incemental backups as well
105 as retention policies.  In addition to backups, the B<zetaback> tool
106 allows for the restore of any backup to a specified host and zfs
107 filesystem.
108
109 =head1 OPTIONS
110
111 The non-optional action command line arguments define the invocation purpose
112 of B<zetaback>.  All other arguments are optional and refine the target
113 of the action specified.
114
115 =head2 Generic Options
116
117 The following arguments have the same meaning over several actions:
118
119 =over
120
121 =item -c <conf>
122
123 Use the specified file as the configuration file.  The default file, if
124 none is specified is /usr/local/etc/zetaback.conf.  The prefix of this
125 file may also be specified as an argument to the configure script.
126
127 =item -d
128
129 Enable debugging output.
130
131 =item -n
132
133 Don't actually perform any remote commands or expunging.  This is useful with
134 the -d argument to ascertain what would be done if the command was actually
135 executed.
136
137 =item -t <timestamp>
138
139 Used during the restore process to specify a backup image from the desired
140 point in time.  If omitted, the command becomes interactive.  This timestamp
141 is a UNIX timestamp and is shown in the output of the -s and -sx actions.
142
143 =item -rhost <host>
144
145 Specify the remote host that is the target for a restore operation.  If
146 omitted the command becomes interactive.
147
148 =item -rzfs <zfs>
149
150 Specify the remote ZFS filesystem that is the target for a restore
151 operation.  If omitted the command becomes interactive.
152
153 =item -h <host>
154
155 Filters the operation to the host specified.  If <host> is of the form
156 /pattern/, it matches 'pattern' as a perl regular expression against available
157 hosts.  If omitted, no limit is enforced and all hosts are used for the action.
158
159 =item -z <zfs>
160
161 Filters the operation to the zfs filesystem specified.  If <zfs> is of the
162 form /pattern/, it matches 'pattern' as a perl regular expression against
163 available zfs filesystems.  If omitted, no filter is enforced and all zfs
164 filesystems are used for the action.
165
166 =back
167
168 =head2 Actions
169
170 =over
171
172 =item -v
173
174 Show the version.
175
176 =item -l
177
178 Show a brief listing of available backups.
179
180 =item -s
181
182 Like -l, -s will show a list of backups but provides additional information
183 about the backups including timestamp, type (full or incremental) and the
184 size on disk.
185
186 =item -sx
187
188 Shows an extended summary.  In addition to the output provided by the -s
189 action, the -sx action will show detail for each availble backup.  For
190 full backups, the detail will include any more recent full backups, if
191 they exist.  For incremental backups, the detail will include any
192 incremental backups that are more recent than the last full backup.
193
194 =item -sv
195
196 Display all backups in the current store that violate the configured
197 backup policy. This is where the most recent full backup is older than
198 full_interval seconds ago, or the most recent incremental backup is older
199 than backup_interval seconds ago.
200
201 =item --files
202
203 Display the on-disk file corresponding to each backup named in the output.
204 This is useful with the -sv flag to name violating files.  Often times,
205 violators are filesystems that have been removed on the host machines and
206 zetaback can no longer back them up.  Be very careful if you choose to
207 automate the removal of such backups as filesystems that would be backed up
208 by the next regular zetaback run will often show up as violators.
209
210 =item -a
211
212 Performs an archive.  This option will look at all eligible backup points
213 (as restricted by -z and -h) and move those to the configured archive
214 directory.  The recommended use is to first issue -sx --files then
215 carefully review available backup points and prune those that are
216 unneeded.  Then invoke with -a to move only the remaining "desired"
217 backup points into the archives.  Archived backups do not appear in any
218 listings or in the list of policy violators generated by the -sv option.
219 In effect, they are no longer "visible" to zetaback.
220
221 =item -b
222
223 Performs a backup.  This option will investigate all eligible hosts, query
224 the available filesystems from the remote agent and determine if any such
225 filesystems require a new full or incremental backup to be taken.  This
226 option may be combined with the -x option (to clean up afterwards.)
227
228 =item -ff
229
230 Forces a full backup to be taken on each filesystem encountered.  This is
231 used in combination with -b.  It is recommended to use this option only when
232 targeting specific filesystems (via the -h and -z options.)  Forcing a full
233 backup across all machines will cause staggered backups to coalesce and
234 could cause performance issues.
235
236 =item -fi
237
238 Forces an incremental backup to be taken on each filesystem encountered. 
239 This is used in combination with -b.  It is recommended to use this option
240 only when targeting specific filesystems (via the -h and -z options.)  Forcing
241 an incremental backup across all machines will cause staggered backups
242 to coalesce and could cause performance issues.
243
244 =item -x
245
246 Perform an expunge.  This option will determine which, if any, of the local
247 backups may be deleted given the retention policy specified in the
248 configuration.
249
250 =item -r
251
252 Perform a restore.  This option will operate on the specified backup and
253 restore it to the ZFS filesystem specified with -rzfs on the host specified
254 with the -rhost option.  The -h, -z and -t options may be used to filter
255 the source backup list.  If the filtered list contains more than one
256 source backup image, the command will act interactively.  If the -rhost
257 and -rzfs command are not specified, the command will act interactively.
258
259 =back
260
261 =cut
262
263 if($VERSION) {
264   print "zetaback: $version_string\n";
265   exit 0;
266 }
267
268 =pod
269
270 =head1 CONFIGURATION
271
272 The zetaback configuration file consists of a default stanza, containing
273 settings that can be overridden on a per-host basis.  A stanza begins
274 either with the string 'default', or a fully-qualified hostname, with
275 settings enclosed in braces ({}).  Single-line comments begin with a hash
276 ('#'), and whitespace is ignored, so feel free to indent for better
277 readability.  Every host to be backed up must have a host stanza in the
278 configuration file.
279
280 =head2 Settings
281
282 The following settings are valid in both the default and host scopes:
283
284 =over
285
286 =item store
287
288 The base directory under which to keep backups.  An interpolated variable
289 '%h' can be used, which expands to the hostname.  There is no default for
290 this setting.
291
292 =item archive
293
294 The base directory under which archives are stored.  The format is the same
295 as the store setting.  This is the destination to which files are relocated
296 when issuing an archive action (-a).
297
298 =item agent
299
300 The location of the zetaback_agent binary on the host.  There is no default
301 for this setting.
302
303 =item time_format
304
305 All timestamps within zetaback are in UNIX timestamp format.  This setting
306 provides a string for formatting all timestamps on output.  The sequences
307 available are identical to those in strftime(3).  If not specified, the
308 default is '%Y-%m-%d %H:%M:%S'.
309
310 =item backup_interval
311
312 The frequency (in seconds) at which to perform incremental backups.  An
313 incremental backup will be performed if the current time is more than
314 backup_interval since the last incremental backup.  If there is no full backup
315 for a particular filesystem, then a full backup is performed.  There is no
316 default for this setting.
317
318 =item full_interval
319
320 The frequency (in seconds) at which to perform full backups.  A full backup will
321 be performed if the current time is more than full_interval since the last full
322 backup.
323
324 =item retention
325
326 The retention time (in seconds) for backups.  This can be a simple number, in
327 which case all backups older than this will be expunged.
328
329 The retention specification can also be more complex, and consist of pairs of
330 values separated by a comma. The first value is a time period in seconds, and
331 the second value is how many backups should be retained within that period.
332 For example:
333
334 retention = 3600,4;86400,11
335
336 This will keep up to 4 backups for the first hour, and an additional 11
337 backups over 24 hours. The times do not stack. In other words, the 11 backups
338 would be kept during the period from 1 hour old to 24 hours old, or one every
339 2 hours.
340
341 Any backups older than the largest time given are deleted. In the above
342 example, all backups older than 24 hours are deleted.
343
344 If a second number is not specified, then all backups are kept within that
345 period.
346
347 Note: Full backups are never deleted if they are depended upon by an
348 incremental. In addition, the most recent backup is never deleted, regardless
349 of how old it is.
350
351 This value defaults to (14 * 86400), or two weeks.
352
353 =item compressionlevel
354
355 Compress files using gzip at the specified compression level. 0 means no
356 compression. Accepted values are 1-9. Defaults to 1 (fastest/minimal
357 compression.)
358
359 =item ssh_config
360
361 Full path to an alternate ssh client config.  This is useful for specifying a
362 less secure but faster cipher for some hosts, or using a different private
363 key.  There is no default for this setting.
364
365 =item dataset_backup
366
367 By default zetaback backs zfs filesystems up to files. This option lets you
368 specify that the backup go be stored as a zfs dataset on the backup host.
369
370 =item offline
371
372 Setting this option to 1 for a host will mark it as being 'offline'. Hosts
373 that are marked offline will not be backed up, will not have any old backups
374 expunged and will not be included in the list of policy violators. However,
375 the host will still be shown when listing backups and archiving.
376
377 =back
378
379 =head1 CONFIGURATION EXAMPLES
380
381 =head2 Uniform hosts
382
383 This config results in backups stored in /var/spool/zfs_backups, with a
384 subdirectory for each host.  Incremental backups will be performed
385 approximately once per day, assuming zetaback is run hourly.  Full backups
386 will be done once per week.  Time format and retention are default.
387
388   default {
389     store = /var/spool/zfs_backups/%h
390     agent = /usr/local/bin/zetaback_agent
391     backup_interval = 83000
392     full_interval = 604800
393   }
394
395   host1 {}
396
397   host2 {}
398
399 =head2 Non-uniform hosts
400
401 Here, host1's and host2's agents are found in different places, and host2's
402 backups should be stored in a different path.
403
404   default {
405     store = /var/spool/zfs_backups/%h
406     agent = /usr/local/bin/zetaback_agent
407     backup_interval = 83000
408     full_interval = 604800
409   }
410
411   host1 {
412     agent = /opt/local/bin/zetaback_agent
413   }
414
415   host2 {
416     store = /var/spool/alt_backups/%h
417     agent = /www/bin/zetaback_agent
418   }
419
420 =cut
421
422 # Make the parser more formal:
423 # config => stanza*
424 # stanza => string { kvp* }
425 # kvp    => string = string
426 my $str_re = qr/(?:"(?:\\\\|\\"|[^"])*"|\S+)/;
427 my $kvp_re = qr/($str_re)\s*=\s*($str_re)/;
428 my $stanza_re = qr/($str_re)\s*\{((?:\s*$kvp_re)*)\s*\}/;
429
430 sub parse_config() {
431   local($/);
432   $/ = undef;
433   open(CONF, "<$CONF") || die "Unable to open config file: $CONF";
434   my $file = <CONF>;
435   # Rip comments
436   $file =~ s/^\s*#.*$//mg;
437   while($file =~ m/$stanza_re/gm) {
438     my $scope = $1;
439     my $filepart = $2;
440     $scope =~ s/^"(.*)"$/$1/;
441     $conf{$scope} ||= {};
442     while($filepart =~ m/$kvp_re/gm) {
443       my $key = $1;
444       my $value = $2;
445       $key =~ s/^"(.*)"$/$1/;
446       $value =~ s/^"(.*)"$/$1/;
447       $conf{$scope}->{lc($key)} = $value;
448     }
449   }
450   close(CONF);
451 }
452 sub config_get($$;$) {
453   # Params: host, key, class
454   # Order of precedence: class, host, default
455   if ($_[2]) {
456     return $conf{$_[2]}->{$_[1]} || $conf{$_[0]}->{$_[1]} ||
457         $conf{'default'}->{$_[1]};
458   } else {
459     return $conf{$_[0]}->{$_[1]} || $conf{'default'}->{$_[1]};
460   }
461 }
462
463 sub get_store($;$) {
464   my ($host, $class) = @_;
465   my $store = config_get($host, 'store', $class);
466   $store =~ s/%h/$host/g;;
467   mkpath($store) if(! -d $store);
468   return $store;
469 }
470
471 sub get_classes() {
472   my @classes = (""); # The default/blank class is always present
473   foreach my $key (keys %conf) {
474     if ($conf{$key}->{'type'} eq 'class') {
475       push @classes, $key;
476     }
477   }
478   return @classes;
479 }
480
481 sub fs_encode($) {
482   my $d = shift;
483   my @parts = split('@', $d);
484   my $e = encode_base64($parts[0], '');
485   $e =~ s/\//_/g;
486   $e =~ s/=/-/g;
487   $e =~ s/\+/\./g;
488   if (exists $parts[1]) {
489     $e .= "\@$parts[1]";
490   }
491   return $e;
492 }
493 sub fs_decode($) {
494   my $e = shift;
495   $e =~ s/_/\//g;
496   $e =~ s/-/=/g;
497   $e =~ s/\./\+/g;
498   return decode_base64($e);
499 }
500 sub dir_encode($) {
501   my $d = shift;
502   my $e = encode_base64($d, '');
503   $e =~ s/\//_/;
504   return $e;
505 }
506 sub dir_decode($) {
507   my $e = shift;
508   $e =~ s/_/\//;
509   return decode_base64($e);
510 }
511 sub pretty_size($) {
512   my $bytes = shift;
513   if($bytes > 1024*1024*1024) {
514     return sprintf("%0.2f Gb", $bytes / (1024*1024*1024));
515   }
516   if($bytes > 1024*1024) {
517     return sprintf("%0.2f Mb", $bytes / (1024*1024));
518   }
519   if($bytes > 1024) {
520     return sprintf("%0.2f Kb", $bytes / (1024));
521   }
522   return "$bytes b";
523 }
524 sub lock($;$$) {
525   my ($host, $file, $nowait) = @_;
526   print "Acquiring lock for $host:$file\n" if($DEBUG);
527   $file ||= 'master.lock';
528   my $store = get_store($host); # Don't take classes into account - not needed
529   return 1 if(exists($locks{"$host:$file"}));
530   open(LOCK, "+>>$store/$file") || return 0;
531   unless(flock(LOCK, LOCK_EX | ($nowait ? LOCK_NB : 0))) {
532     close(LOCK);
533     return 0;
534   }
535   $locks{"$host:$file"} = \*LOCK;
536   return 1;
537 }
538 sub unlock($;$$) {
539   my ($host, $file, $remove) = @_;
540   print "Releasing lock for $host:$file\n" if($DEBUG);
541   $file ||= 'master.lock';
542   my $store = get_store($host); # Don't take classes into account - not needed
543   return 0 unless(exists($locks{"$host:$file"}));
544   *UNLOCK = $locks{$file};
545   unlink("$store/$file") if($remove);
546   flock(UNLOCK, LOCK_UN);
547   close(UNLOCK);
548   return 1;
549 }
550 sub scan_for_backups($) {
551   my %info = ();
552   my $dir = shift;
553   $info{last_full} = $info{last_incremental} = $info{last_backup} = 0;
554   # Look for standard file based backups first
555   opendir(D, $dir) || return \%info;
556   foreach my $file (readdir(D)) {
557     if($file =~ /^(\d+)\.([^\.]+)\.full$/) {
558       my $whence = $1;
559       my $fs = dir_decode($2);
560       $info{$fs}->{full}->{$whence}->{'file'} = "$dir/$file";
561       $info{$fs}->{last_full} = $whence if($whence > $info{$fs}->{last_full});
562       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
563                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
564     }
565     elsif($file =~ /^(\d+).([^\.]+)\.incremental.(\d+)$/) {
566       my $whence = $1;
567       my $fs = dir_decode($2);
568       $info{$fs}->{incremental}->{$whence}->{'depends'} = $3;
569       $info{$fs}->{incremental}->{$whence}->{'file'} = "$dir/$file";
570       $info{$fs}->{last_incremental} = $whence if($whence > $info{$fs}->{last_incremental});
571       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
572                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
573     }
574   }
575   closedir(D);
576   # Now look for zfs based backups
577   my $storefs;
578   eval {
579     $storefs = get_fs_from_mountpoint($dir);
580   };
581   return \%info if ($@);
582   my $rv = open(ZFSLIST, "__ZFS__ list -H -r -t snapshot $storefs |");
583   return \%info unless $rv;
584   while (<ZFSLIST>) {
585       my @F = split(' ');
586       my ($rawfs, $snap) = split('@', $F[0]);
587       my ($whence) = ($snap =~ /(\d+)/);
588       next unless $whence;
589       my @fsparts = split('/', $rawfs);
590       my $fs = fs_decode($fsparts[-1]);
591       # Treat a dataset backup as a full backup from the point of view of the
592       # backup lists
593       $info{$fs}->{full}->{$whence}->{'snapshot'} = $snap;
594       $info{$fs}->{full}->{$whence}->{'dataset'} = "$rawfs\@$snap";
595       # Note - this field isn't set for file backups - we probably should do
596       # this
597       $info{$fs}->{full}->{$whence}->{'pretty_size'} = "$F[1]";
598       $info{$fs}->{last_full} = $whence if ($whence >
599           $info{$fs}->{last_full});
600       $info{$fs}->{last_backup} = $whence if ($whence >
601           $info{$fs}->{last_backup});
602   }
603   close(ZFSLIST);
604
605   return \%info;
606 }
607
608 parse_config();
609
610 sub zetaback_log($$;@) {
611   my ($host, $mess, @args) = @_;
612   my $tf = config_get($host, 'time_format');
613   my $file = config_get($host, 'logfile');
614   my $fileh;
615   if(defined($file)) {
616     $fileh = IO::File->new(">>$file");
617   }
618   $fileh ||= IO::File->new(">&STDERR");
619   printf $fileh "%s: $mess", strftime($tf, localtime(time)), @args;
620   $fileh->close();
621 }
622
623 sub zfs_remove_snap($$$) {
624   my ($host, $fs, $snap) = @_;
625   my $agent = config_get($host, 'agent');
626   my $ssh_config = config_get($host, 'ssh_config');
627   $ssh_config = "-F $ssh_config" if($ssh_config);
628   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
629   return unless($snap);
630   print "Dropping $snap on $fs\n" if($DEBUG);
631   `ssh $ssh_config $host $agent -z $fs -d $snap`;
632 }
633
634 # Lots of args.. internally called.
635 sub zfs_do_backup($$$$$$;$) {
636   my ($host, $fs, $type, $point, $store, $dumpname, $base) = @_;
637   my ($storefs, $encodedname);
638   my $agent = config_get($host, 'agent');
639   my $ssh_config = config_get($host, 'ssh_config');
640   $ssh_config = "-F $ssh_config" if($ssh_config);
641   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
642
643   # compression is meaningless for dataset backups
644   if ($type ne "s") {
645     my $cl = config_get($host, 'compressionlevel');
646     if ($cl >= 1 && $cl <= 9) {
647         open(LBACKUP, "|gzip -$cl >$store/.$dumpname") ||
648         die "zfs_full_backup: cannot create dump\n";
649     } else {
650         open(LBACKUP, ">$store/.$dumpname") ||
651         die "zfs_full_backup: cannot create dump\n";
652     }
653   } else {
654     # Dataset backup - pipe received filesystem to zfs recv
655     eval {
656       $storefs = get_fs_from_mountpoint($store);
657     };
658     if ($@) {
659       # The zfs filesystem doesn't exist, so we have to work out what it
660       # would be
661       my $basestore = $store;
662       $basestore =~ s/\/?%h//g;
663       $storefs = get_fs_from_mountpoint($basestore);
664       $storefs="$storefs/$host";
665     }
666     $encodedname = fs_encode($dumpname);
667     print STDERR "Receiving to zfs filesystem $storefs/$encodedname\n"
668       if($DEBUG);
669     zfs_create_intermediate_filesystems("$storefs/$encodedname");
670     open(LBACKUP, "|__ZFS__ recv $storefs/$encodedname");
671   }
672   # Do it. yeah.
673   eval {
674     if(my $pid = fork()) {
675       close(LBACKUP);
676       waitpid($pid, 0);
677       die "error: $?" if($?);
678     }
679     else {
680       my @cmd = ('ssh', split(/ /, $ssh_config), $host, $agent, '-z', $fs);
681       if ($type eq "i" || ($type eq "s" && $base)) {
682         push @cmd, ("-i", $base);
683       }
684       if ($type eq "f" || $type eq "s") {
685         push @cmd, ("-$type", $point);
686       }
687       open STDIN, "/dev/null" || exit(-1);
688       open STDOUT, ">&LBACKUP" || exit(-1);
689       print STDERR "   => @cmd\n" if($DEBUG);
690       exec { $cmd[0] } @cmd;
691       print STDERR "$cmd[0] failed: $?\n";
692       exit($?);
693     }
694     if ($type ne "s") {
695       die "dump failed (zero bytes)\n" if(-z "$store/.$dumpname");
696       rename("$store/.$dumpname", "$store/$dumpname") || die "cannot rename dump\n";
697     } else {
698       # Check everything is ok
699       `__ZFS__ list $storefs/$encodedname`;
700       die "dump failed (received snapshot $storefs/$encodedname does not exist)\n"
701         if $?;
702     }
703   };
704   if($@) {
705     if ($type ne "s") {
706         unlink("$store/.$dumpname");
707     }
708     chomp(my $error = $@);
709     $error =~ s/[\r\n]+/ /gsm;
710     zetaback_log($host, "FAILED[$error] $host:$fs $type\n");
711     die "zfs_full_backup: failed $@";
712   }
713   my $size;
714   if ($type ne "s") {
715     my @st = stat("$store/$dumpname");
716     $size = pretty_size($st[7]);
717   } else {
718     $size = `__ZFS__ get -Ho value used $storefs/$encodedname`;
719     chomp $size;
720   }
721   zetaback_log($host, "SUCCESS[$size] $host:$fs $type\n");
722 }
723
724 sub zfs_create_intermediate_filesystems($) {
725   my ($fs) = @_;
726   my $idx=0;
727   while (($idx = index($fs, '/', $idx+1)) != -1) {
728       my $fspart = substr($fs, 0, $idx);
729       `__ZFS__ list $fspart 2>&1`;
730       if ($?) {
731         print STDERR "Creating intermediate zfs filesystem: $fspart\n"
732           if $DEBUG;
733         `__ZFS__ create $fspart`;
734       }
735   }
736 }
737
738 sub zfs_full_backup($$$) {
739   my ($host, $fs, $store) = @_;
740
741   # Translate into a proper dumpname
742   my $point = time();
743   my $efs = dir_encode($fs);
744   my $dumpname = "$point.$efs.full";
745
746   zfs_do_backup($host, $fs, 'f', $point, $store, $dumpname);
747 }
748
749 sub zfs_incremental_backup($$$$) {
750   my ($host, $fs, $base, $store) = @_;
751   my $agent = config_get($host, 'agent');
752
753   # Translate into a proper dumpname
754   my $point = time();
755   my $efs = dir_encode($fs);
756   my $dumpname = "$point.$efs.incremental.$base";
757
758   zfs_do_backup($host, $fs, 'i', $point, $store, $dumpname, $base);
759 }
760
761 sub zfs_dataset_backup($$$$) {
762   my ($host, $fs, $base, $store) = @_;
763   my $agent = config_get($host, 'agent');
764
765   my $point = time();
766   my $dumpname = "$fs\@$point";
767
768   zfs_do_backup($host, $fs, 's', $point, $store, $dumpname, $base);
769 }
770
771 sub perform_retention($) {
772   my ($host) = @_;
773   my $now = time();
774
775   if ($DEBUG) {
776     print "Performing retention for $host\n";
777   }
778
779   foreach my $class (get_classes()) {
780     if ($DEBUG) {
781       if ($class) {
782         print "=> Class: $class\n" if $class;
783       } else {
784         print "=> Class: (none)\n";
785       }
786     }
787     my $retention = config_get($host, 'retention', $class);
788     my $store = get_store($host, $class);
789     my $backup_info = scan_for_backups($store);
790     foreach my $disk (sort keys %{$backup_info}) {
791       my $info = $backup_info->{$disk};
792       next unless(ref($info) eq 'HASH');
793       my %must_save;
794
795       if ($DEBUG) {
796         print "   $disk\n";
797       }
798
799       # Get a list of all the full and incrementals, sorts newest to oldest
800       my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
801       @backup_points = sort { $b <=> $a } @backup_points;
802
803       # We _cannot_ throw away _all_ our backups,
804       # so save the most recent incremental and full no matter what
805       push(@{$must_save{$backup_points[0]}}, "most recent backup");
806       my @fulls = grep { exists($info->{full}->{$_}) } @backup_points;
807       push(@{$must_save{$fulls[0]}}, "most recent full");
808
809       # Process retention policy
810       my @parts = split(/;/, $retention);
811       my %retention_map;
812       foreach (@parts) {
813         my ($period, $amount) = split(/,/);
814         if (!defined($amount)) {
815           $amount = -1;
816         }
817         $retention_map{$period} = $amount;
818       }
819       my @periods = sort { $a <=> $b } keys(%retention_map);
820       my %backup_bins;
821       foreach(@periods) {
822         $backup_bins{$_} = ();
823       }
824       my $cutoff = $now - $periods[0];
825       # Sort backups into time period sections
826       foreach (@backup_points) {
827         # @backup_points is in descending order (newest first)
828         while ($_ <= $cutoff) {
829           # Move to the next largest bin if the current backup is not in the
830           # current bin. However, if there is no larger bin, then don't
831           shift(@periods);
832           if (@periods) {
833             $cutoff = $now - $periods[0];
834           } else {
835             last;
836           }
837         }
838         # Throw away all backups older than the largest time period specified
839         if (!@periods) {
840           last;
841         }
842         push(@{$backup_bins{$periods[0]}}, $_);
843       }
844       foreach (keys(%backup_bins)) {
845         my $keep = $retention_map{$_}; # How many backups to keep
846         if ($backup_bins{$_}) {
847           my @backups = @{$backup_bins{$_}};
848           my $total = @backups;  # How many backups we have
849           # If we didn't specify how many to keep, keep them all
850           if ($keep == -1) { $keep = $total };
851           # If we have less backups than we should keep, keep them all
852           if ($total < $keep) { $keep = $total };
853           for (my $i = 1; $i <= $keep; $i++) {
854             my $idx = int(($i * $total) / $keep) - 1;
855             push(@{$must_save{$backups[$idx]}}, "retention policy - $_");
856           }
857         }
858       }
859       if ($DEBUG) {
860         print "    => Backup bins:\n";
861         foreach my $a (keys(%backup_bins)) {
862           print "      => $a\n";
863           foreach my $i (@{$backup_bins{$a}}) {
864             my $trans = $now - $i;
865             print "         => $i ($trans seconds old)";
866             if (exists($must_save{$i})) { print " => keep" };
867             print "\n";
868           }
869         }
870       }
871
872       # Look for dependencies
873       foreach (@backup_points) {
874         if(exists($info->{incremental}->{$_})) {
875           print "   => $_ depends on $info->{incremental}->{$_}->{depends}\n" if($DEBUG);
876           if (exists($must_save{$_})) {
877             push(@{$must_save{$info->{incremental}->{$_}->{depends}}},
878               "dependency");
879           }
880         }
881       }
882
883       my @removals = grep { !exists($must_save{$_}) } @backup_points;
884       if($DEBUG) {
885         my $tf = config_get($host, 'time_format');
886         print "    => Candidates for removal:\n";
887         foreach (@backup_points) {
888           print "      => ". strftime($tf, localtime($_));
889           print " ($_)";
890           print " [". (exists($info->{full}->{$_}) ? "full":"incremental") ."]";
891           if (exists($must_save{$_})) {
892             my $reason = join(", ", @{$must_save{$_}});
893             print " => keep ($reason)";
894           } else {
895             print " => remove";
896           }
897           print "\n";
898         }
899       }
900       foreach (@removals) {
901         my $efs = dir_encode($disk);
902         my $filename;
903         my $dataset;
904         if(exists($info->{full}->{$_}->{file})) {
905           $filename = $info->{full}->{$_}->{file};
906         } elsif(exists($info->{incremental}->{$_}->{file})) {
907           $filename = $info->{incremental}->{$_}->{file};
908         } elsif(exists($info->{full}->{$_}->{dataset})) {
909           $dataset = $info->{full}->{$_}->{dataset};
910         } elsif(exists($info->{incremental}->{$_}->{dataset})) {
911           $dataset = $info->{incremental}->{$_}->{dataset};
912         } else {
913           print "ERROR: We tried to expunge $host $disk [$_], but couldn't find it.\n";
914         }
915         print "    => expunging ${filename}${dataset}\n" if($DEBUG);
916         unless($NEUTERED) {
917           if ($filename) {
918             unlink($filename) || print "ERROR: unlink $filename: $?\n";
919           } elsif ($dataset) {
920             `__ZFS__ destroy $dataset`;
921             if ($?) {
922               print "ERROR: zfs destroy $dataset: $?\n";
923             }
924           }
925         }
926       }
927     }
928   }
929 }
930
931 sub __default_sort($$) { return $_[0] cmp $_[1]; }
932    
933 sub choose($$;$) {
934   my($name, $obj, $sort) = @_;
935   $sort ||= \&__default_sort;;
936   my @list;
937   my $hash;
938   if(ref $obj eq 'ARRAY') {
939     @list = sort { $sort->($a,$b); } (@$obj);
940     map { $hash->{$_} = $_; } @list;
941   }
942   elsif(ref $obj eq 'HASH') {
943     @list = sort { $sort->($a,$b); } (keys %$obj);
944     $hash = $obj;
945   }
946   else {
947     die "choose passed bad object: " . ref($obj) . "\n";
948   }
949   return $list[0] if(scalar(@list) == 1);
950   print "\n";
951   my $i = 1;
952   for (@list) {
953     printf " %3d) $hash->{$_}\n", $i++;
954   }
955   my $selection = 0;
956   while($selection !~ /^\d+$/ or
957         $selection < 1 or
958         $selection >= $i) {
959     print "$name: ";
960     chomp($selection = <>);
961   }
962   return $list[$selection - 1];
963 }
964
965 sub backup_chain($$) {
966   my ($info, $ts) = @_;
967   my @list;
968   push @list, $info->{full}->{$ts} if(exists($info->{full}->{$ts}));
969   if(exists($info->{incremental}->{$ts})) {
970     push @list, $info->{incremental}->{$ts};
971     push @list, backup_chain($info, $info->{incremental}->{$ts}->{depends});
972   }
973   return @list;
974 }
975
976 sub get_fs_from_mountpoint($) {
977     my ($mountpoint) = @_;
978     my $fs;
979     my $rv = open(ZFSLIST, "__ZFS__ list -t filesystem -H |");
980     die "Unable to determine zfs filesystem for $mountpoint" unless $rv;
981     while (<ZFSLIST>) {
982         my @F = split(' ');
983         if ($F[-1] eq $mountpoint) {
984             $fs = $F[0];
985             last;
986         }
987     }
988     close(ZFSLIST);
989     die "Unable to determine zfs filesystem for $mountpoint" unless $fs;
990     return $fs;
991 }
992
993 sub perform_restore() {
994   my (%source, %classmap);
995
996   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
997       keys %conf) {
998     # If -h was specific, we will skip this host if the arg isn't
999     # an exact match or a pattern match
1000     if($HOST &&
1001        !(($HOST eq $host) ||
1002          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
1003       next;
1004     }
1005
1006     foreach my $class (get_classes()) {
1007       if ($DEBUG) {
1008         if ($class) {
1009           print "=> Class: $class\n" if $class;
1010         } else {
1011           print "=> Class: (none)\n";
1012         }
1013       }
1014       my $store = get_store($host, $class);
1015       my $backup_info = scan_for_backups($store);
1016       foreach my $disk (sort keys %{$backup_info}) {
1017         my $info = $backup_info->{$disk};
1018         next unless(ref($info) eq 'HASH');
1019         next
1020           if($ZFS &&      # if the pattern was specified it could
1021             !($disk eq $ZFS ||        # be a specific match or a
1022               ($ZFS =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
1023         # We want to see this one
1024         my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
1025         my @source_points;
1026         foreach (@backup_points) {
1027           push @source_points, $_ if(!$TIMESTAMP || $TIMESTAMP == $_)
1028         }
1029         if(@source_points) {
1030           $source{$host}->{$disk} = \@source_points;
1031           $classmap{$host}->{$disk} = $class;
1032         }
1033       }
1034     }
1035   }
1036
1037   if(! keys %source) {
1038     print "No matching backups found\n";
1039     return;
1040   }
1041
1042   # Here goes the possibly interactive dialog
1043   my $host = choose("Restore from host",  [keys %source]);
1044   my $disk = choose("Restore from ZFS", [keys %{$source{$host}}]);
1045  
1046   # Times are special.  We build a human readable form and use a numerical
1047   # sort function instead of the default lexical one.
1048   my %times;
1049   my $tf = config_get($host, 'time_format');
1050   map { $times{$_} = strftime($tf, localtime($_)); } @{$source{$host}->{$disk}};
1051   my $timestamp = choose("Restore as of timestamp", \%times,
1052                          sub { $_[0] <=> $_[1]; });
1053
1054   my $store = get_store($host, $classmap{$host}->{$disk});
1055   my $backup_info = scan_for_backups($store);
1056   my @backup_list = reverse backup_chain($backup_info->{$disk}, $timestamp);
1057
1058   if(!$RESTORE_HOST) {
1059     print "Restore to host [$host]:";
1060     chomp(my $input = <>);
1061     $RESTORE_HOST = length($input) ? $input : $host;
1062   }
1063   if(!$RESTORE_ZFS) {
1064     print "Restore to zfs [$disk]:";
1065     chomp(my $input = <>);
1066     $RESTORE_ZFS = length($input) ? $input : $disk;
1067   }
1068
1069   # show intentions
1070   print "Going to restore:\n";
1071   print "\tfrom: $host\n";
1072   print "\tfrom: $disk\n";
1073   print "\t  at: $timestamp [" . strftime($tf, localtime($timestamp)) . "]\n";
1074   print "\t  to: $RESTORE_HOST\n";
1075   print "\t  to: $RESTORE_ZFS\n";
1076   print "\n";
1077
1078   foreach(@backup_list) {
1079     $_->{success} = zfs_restore_part($RESTORE_HOST, $RESTORE_ZFS, $_->{file}, $_->{dataset}, $_->{depends});
1080   }
1081 }
1082
1083 sub zfs_restore_part($$$$;$) {
1084   my ($host, $fs, $file, $dataset, $dep) = @_;
1085   unless ($file || $dataset) {
1086     print STDERR "=> No dataset or filename given to restore. Bailing out.";
1087     return 1;
1088   }
1089   my $ssh_config = config_get($host, 'ssh_config');
1090   $ssh_config = "-F $ssh_config" if($ssh_config);
1091   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
1092   my $command;
1093   if(exists($conf{$host})) {
1094     my $agent = config_get($host, 'agent');
1095     $command = "$agent -r -z $fs";
1096     $command .= " -b $dep" if($dep);
1097   }
1098   else {
1099     $command = "__ZFS__ recv $fs";
1100   }
1101   if ($file) {
1102     print " => piping $file to $command\n" if($DEBUG);
1103     print "gzip -dfc $file | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1104   } elsif ($dataset) {
1105     print " => piping $dataset to $command using zfs send\n" if ($DEBUG);
1106     print "zfs send $dataset | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1107   }
1108   unless($NEUTERED) {
1109     if ($file) {
1110       open(DUMP, "gzip -dfc $file |");
1111     } elsif ($dataset) {
1112       open(DUMP, "__ZFS__ send $dataset |");
1113     }
1114     eval {
1115       open(RECEIVER, "| ssh $ssh_config $host $command");
1116       my $buffer;
1117       while(my $len = sysread(DUMP, $buffer, $BLOCKSIZE)) {
1118         if(syswrite(RECEIVER, $buffer, $len) != $len) {
1119           die "$!";
1120         }
1121       }
1122     };
1123     close(DUMP);
1124     close(RECEIVER);
1125   }
1126   return $?;
1127 }
1128
1129 sub pretty_print_backup($$$) {
1130   my ($info, $host, $point) = @_;
1131   my $tf = config_get($host, 'time_format');
1132   print "\t" . strftime($tf, localtime($point)) . " [$point] ";
1133   if(exists($info->{full}->{$point})) {
1134     if ($info->{full}->{$point}->{file}) {
1135       my @st = stat($info->{full}->{$point}->{file});
1136       print "FULL " . pretty_size($st[7]);
1137       print "\n\tfile: $info->{full}->{$point}->{file}" if($SHOW_FILENAMES);
1138     } elsif ($info->{full}->{$point}->{dataset}) {
1139       print "FULL $info->{full}->{$point}->{pretty_size}";
1140       print "\n\tdataset: $info->{full}->{$point}->{dataset}"
1141         if($SHOW_FILENAMES);
1142     }
1143   } else {
1144     my @st = stat($info->{incremental}->{$point}->{file});
1145     print "INCR from [$info->{incremental}->{$point}->{depends}] " . pretty_size($st[7]);
1146     print "\n\tfile: $info->{incremental}->{$point}->{file}" if($SHOW_FILENAMES);
1147   }
1148   print "\n";
1149 }
1150
1151 sub show_backups($$) {
1152   my ($host, $diskpat) = @_;
1153   my (@files, @datasets, %classmap);
1154   my $tf = config_get($host, 'time_format');
1155   foreach my $class (get_classes()) {
1156     if ($DEBUG) {
1157       if ($class) {
1158         print "=> Class: $class\n" if $class;
1159       } else {
1160         print "=> Class: (none)\n";
1161       }
1162     }
1163     my $store = get_store($host, $class);
1164     my $backup_info = scan_for_backups($store);
1165     foreach my $disk (sort keys %{$backup_info}) {
1166       my $info = $backup_info->{$disk};
1167       next unless(ref($info) eq 'HASH');
1168       next
1169         if($diskpat &&      # if the pattern was specified it could
1170           !($disk eq $diskpat ||        # be a specific match or a
1171             ($diskpat =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
1172
1173       my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
1174       @backup_points = sort { $a <=> $b } @backup_points;
1175       @backup_points = (pop @backup_points) unless ($ARCHIVE || $SUMMARY_EXT || $SUMMARY_VIOLATORS);
1176
1177       # Quick short-circuit in the case of retention violation checks
1178       if($SUMMARY_VIOLATORS) {
1179         if(time() > $info->{last_full} + config_get($host, 'full_interval') ||
1180           time() > $info->{last_backup} + config_get($host, 'backup_interval')) {
1181           print "$host:$disk\n";
1182           pretty_print_backup($info, $host, $info->{last_full});
1183           # Only print the last backup if it isn't the same as the last full
1184           if ($info->{last_full} != $info->{last_backup}) {
1185               pretty_print_backup($info, $host, $info->{last_backup});
1186           }
1187         }
1188         next;
1189       }
1190
1191       # We want to see this one
1192       print "$host:$disk\n";
1193       next unless($SUMMARY || $SUMMARY_EXT || $ARCHIVE);
1194       if($SUMMARY_EXT) {
1195         print "\tLast Full: ". ($info->{last_full} ? strftime($tf, localtime($info->{last_full})) : "Never") . "\n";
1196         if($info->{last_full} < $info->{last_incremental}) {
1197           print "\tLast Incr: ". strftime($tf, localtime($info->{last_incremental})). "\n";
1198         }
1199       }
1200       foreach (@backup_points) {
1201         pretty_print_backup($info, $host, $_);
1202         if(exists($info->{full}->{$_}->{file})) {
1203           push @files, $info->{full}->{$_}->{file};
1204           $classmap{$info->{full}->{$_}->{file}} = $class;
1205         } elsif(exists($info->{incremental}->{$_}->{file})) {
1206           push @files, $info->{incremental}->{$_}->{file};
1207           $classmap{$info->{incremental}->{$_}->{file}} = $class;
1208         } elsif(exists($info->{full}->{$_}->{dataset})) {
1209           push @datasets, $info->{full}->{$_}->{dataset};
1210           $classmap{$info->{full}->{$_}->{dataset}} = $class;
1211         }
1212       }
1213       print "\n";
1214     }
1215   }
1216   if($ARCHIVE && (scalar(@files) || scalar(@datasets))) {
1217     print "\nAre you sure you would like to archive ".scalar(@files).
1218       " file(s) and ".scalar(@datasets)." dataset(s)? ";
1219     while(($_ = <>) !~ /(?:y|n|yes|no)$/i) {
1220       print "\nAre you sure you would like to archive ".scalar(@files).
1221         " file(s) and ".scalar(@datasets)." dataset(s)? ";
1222     }
1223     if(/^y/i) {
1224       if (@files) {
1225         my $archive = config_get($host, 'archive');
1226         $archive =~ s/%h/$host/g;
1227         if(! -d $archive) {
1228           mkdir $archive || die "Cannot mkdir($archive)\n";
1229         }
1230         foreach my $file (@files) {
1231           my $store = get_store($host, $classmap{$file});
1232           (my $afile = $file) =~ s/^$store/$archive/;
1233           move($file, $afile) || print "Error archiving $file: $!\n";
1234         }
1235       }
1236       if (@datasets) {
1237         my $archive = config_get($host, 'archive');
1238         (my $basearchive = $archive) =~ s/\/?%h//g;
1239         my $basearchivefs;
1240         eval {
1241           $basearchivefs = get_fs_from_mountpoint($basearchive);
1242         };
1243         die "Unable to find archive filesystem. The archive directory must be the root of a zfs filesystem to archive datasets." if $@;
1244         my $archivefs = "$basearchivefs/$host";
1245         `__ZFS__ create $archivefs`; # We don't care if this fails
1246         my %seen = ();
1247         foreach my $dataset (@datasets) {
1248           my $store = get_store($host, $classmap{$dataset});
1249           my $storefs = get_fs_from_mountpoint($store);
1250           $dataset =~ s/@.*$//; # Only rename filesystems, not snapshots
1251           next if $seen{$dataset}++; # Only rename a filesystem once
1252           (my $adataset = $dataset) =~ s/^$storefs/$archivefs/;
1253           `__ZFS__ rename $dataset $adataset`;
1254           if ($?) {
1255             print "Error archiving $dataset\n";
1256           }
1257         }
1258       }
1259     }
1260   }
1261 }
1262
1263 sub plan_and_run($$) {
1264   my ($host, $diskpat) = @_;
1265   my $store;
1266   my $ssh_config = config_get($host, 'ssh_config');
1267   $ssh_config = "-F $ssh_config" if($ssh_config);
1268   my %suppress;
1269   print "Planning '$host'\n" if($DEBUG);
1270   my $agent = config_get($host, 'agent');
1271   my $took_action = 1;
1272   while($took_action) {
1273     $took_action = 0;
1274     my @disklist;
1275
1276     # We need a lock for the listing.
1277     return unless(lock($host, ".list"));
1278
1279     # Get list of zfs filesystems from the agent
1280     open(SILENT, ">&", \*STDERR);
1281     close(STDERR);
1282     my $rv = open(ZFSLIST, "ssh $ssh_config $host $agent -l |");
1283     open(STDERR, ">&", \*SILENT);
1284     close(SILENT);
1285     next unless $rv;
1286     @disklist = grep { chomp } (<ZFSLIST>);
1287     close(ZFSLIST);
1288
1289     if ($DEBUG) {
1290       print " => Filesystems for $host (zetaback_agent -l output)\n";
1291       foreach my $diskline (@disklist) {
1292         print "    $diskline\n";
1293       }
1294     }
1295
1296     foreach my $diskline (@disklist) {
1297       chomp($diskline);
1298       next unless($diskline =~ /^(\S+) \[([^\]]*)\](?: {([^}]*)})?/);
1299       my $diskname = $1;
1300       my %snaps;
1301       map { $snaps{$_} = 1 } (split(/,/, $2));
1302       my $class = $3;
1303  
1304       # We've just done this.
1305       next if($suppress{"$host:$diskname"});
1306       # If we are being selective (via -z) now is the time.
1307       next
1308         if($diskpat &&          # if the pattern was specified it could
1309            !($diskname eq $diskpat ||        # be a specific match or a
1310              ($diskpat =~ /^\/(.+)\/$/ && $diskname =~ /$1/))); # regex
1311  
1312       $store = get_store($host, $class);
1313       if ($DEBUG) {
1314         if ($class) {
1315             print STDERR "=> Class is $class\n";
1316         } else {
1317             print STDERR "=> No/default class\n";
1318         }
1319       }
1320       print " => Scanning '$store' for old backups of '$diskname'.\n" if($DEBUG);
1321
1322       # Make directory on demand
1323       my $backup_info = scan_for_backups($store);
1324       # That gave us info on all backups, we just want this disk
1325       $backup_info = $backup_info->{$diskname} || {};
1326  
1327       # Should we do a backup?
1328       my $backup_type = 'no';
1329       if(time() > $backup_info->{last_backup} + config_get($host,
1330           'backup_interval', $class)) {
1331         $backup_type = 'incremental';
1332       }
1333       if(time() > $backup_info->{last_full} + config_get($host,
1334           'full_interval', $class)) {
1335         $backup_type = 'full';
1336       }
1337       # If we want an incremental, but have no full, then we need to upgrade to full
1338       if($backup_type eq 'incremental') {
1339         my $have_full_locally = 0;
1340         # For each local full backup, see if the full backup still exists on the other end.
1341         foreach (keys %{$backup_info->{'full'}}) {
1342           $have_full_locally = 1 if(exists($snaps{'__zb_full_' . $_}));
1343         }
1344         $backup_type = 'full' unless($have_full_locally);
1345       }
1346       $backup_type = 'full' if($FORCE_FULL);
1347       $backup_type = 'incremental' if($FORCE_INC);
1348       $backup_type = 'dataset' if(config_get($host, 'dataset_backup', $class)
1349         eq 1 && $backup_type ne 'no');
1350
1351       print " => doing $backup_type backup\n" if($DEBUG);
1352       # We need to drop a __zb_base snap or a __zb_incr snap before we proceed
1353       unless($NEUTERED || $backup_type eq 'no') {
1354         # attempt to lock this action, if it fails, skip -- someone else is working it.
1355         next unless(lock($host, dir_encode($diskname), 1));
1356         unlock($host, '.list');
1357
1358         if($backup_type eq 'full') {
1359           eval { zfs_full_backup($host, $diskname, $store); };
1360           if ($@) {
1361             chomp(my $err = $@);
1362             print " => failure $err\n";
1363           }
1364           else {
1365             # Unless there was an error backing up, remove all the other full snaps
1366             foreach (keys %snaps) {
1367               zfs_remove_snap($host, $diskname, $_) if(/^__zb_full_(\d+)/)
1368             }
1369           }
1370           $took_action = 1;
1371         }
1372         if($backup_type eq 'incremental') {
1373           eval {
1374             zfs_remove_snap($host, $diskname, '__zb_incr') if($snaps{'__zb_incr'});
1375             # Find the newest full from which to do an incremental (NOTE: reverse numeric sort)
1376             my @fulls = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1377             zfs_incremental_backup($host, $diskname, $fulls[0], $store);
1378           };
1379           if ($@) {
1380             chomp(my $err = $@);
1381             print " => failure $err\n";
1382           }
1383           else {
1384             $took_action = 1;
1385           }
1386         }
1387         if($backup_type eq 'dataset') {
1388           my @backups = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1389           eval { zfs_dataset_backup($host, $diskname, $backups[0], $store); };
1390           if ($@) {
1391             chomp(my $err = $@);
1392             print " => failure $err\n";
1393           }
1394           else {
1395             # Unless there was an error backing up, remove all the other dset snaps
1396             foreach (keys %snaps) {
1397               zfs_remove_snap($host, $diskname, $_) if(/^__zb_dset_(\d+)/)
1398             }
1399           }
1400           $took_action = 1;
1401         }
1402         unlock($host, dir_encode($diskname), 1);
1403       }
1404       $suppress{"$host:$diskname"} = 1;
1405       last if($took_action);
1406     }
1407     unlock($host, '.list');
1408   }
1409 }
1410
1411 if($RESTORE) {
1412   perform_restore();
1413 }
1414 else {
1415   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
1416       keys %conf) {
1417     # If -h was specific, we will skip this host if the arg isn't
1418     # an exact match or a pattern match
1419     if($HOST &&
1420        !(($HOST eq $host) ||
1421          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
1422       next;
1423     }
1424
1425     # Skip if the host is marked as 'offline' and we are not listing backups
1426     if (config_get($host, 'offline') == 1 &&
1427         !$LIST && !$SUMMARY && !$SUMMARY_EXT && !$ARCHIVE) {
1428       next;
1429     }
1430
1431     if($LIST || $SUMMARY || $SUMMARY_EXT || $SUMMARY_VIOLATORS || $ARCHIVE) {
1432       show_backups($host, $ZFS);
1433     }
1434     if($BACKUP) {
1435       plan_and_run($host, $ZFS);
1436     }
1437     if($EXPUNGE) {
1438       perform_retention($host);
1439     }
1440   }
1441 }
1442
1443 exit 0;
1444
1445 =pod
1446
1447 =head1 FILES
1448
1449 =over
1450
1451 =item zetaback.conf
1452
1453 The main zetaback configuration file.  The location of the file can be
1454 specified on the command line with the -c flag.  The prefix of this
1455 file may also be specified as an argument to the configure script.
1456
1457 =back
1458
1459 =head1 SEE ALSO
1460
1461 zetaback_agent(1)
1462
1463 =cut
Note: See TracBrowser for help on using the browser.