root/zetaback.in

Revision 30cf15d81467e37c4f29bf704e93ca1d350f6277, 43.9 kB (checked in by Mark Harrison <mark@omniti.com>, 4 years ago)

Change precedence of configuration stanzas

  • Property mode set to 100755
Line 
1 #!/usr/bin/perl
2 # vim: sts=2 sw=2 ts=8 et
3
4 # Copyright (c) 2007 OmniTI Computer Consulting, Inc. All rights reserved.
5 # For information on licensing see:
6 #   https://labs.omniti.com/zetaback/trunk/LICENSE
7
8 use strict;
9 use Getopt::Long;
10 use MIME::Base64;
11 use POSIX qw/strftime/;
12 use Fcntl qw/:flock/;
13 use File::Copy;
14 use IO::File;
15 use Pod::Usage;
16
17 use vars qw/%conf %locks $version_string
18             $PREFIX $CONF $BLOCKSIZE $DEBUG $HOST $BACKUP
19             $RESTORE $RESTORE_HOST $RESTORE_ZFS $TIMESTAMP
20             $LIST $SUMMARY $SUMMARY_EXT $SUMMARY_VIOLATORS
21             $FORCE_FULL $FORCE_INC $EXPUNGE $NEUTERED $ZFS
22             $SHOW_FILENAMES $ARCHIVE $VERSION $HELP/;
23 $version_string = q$URL$;
24 $version_string =~ s#/branches/#/b#;
25 $version_string =~ s#^.*/([^/]+)/[^/]+$#$1#;
26 $PREFIX = q^__PREFIX__^;
27 $CONF = qq^$PREFIX/etc/zetaback.conf^;
28 $BLOCKSIZE = 1024*64;
29
30 $conf{'default'}->{'time_format'} = "%Y-%m-%d %H:%M:%S";
31 $conf{'default'}->{'retention'} = 14 * 86400;
32 $conf{'default'}->{'compressionlevel'} = 1;
33 $conf{'default'}->{'dataset_backup'} = 0;
34
35 =pod
36
37 =head1 NAME
38
39 zetaback - perform backup, restore and retention policies for ZFS backups.
40
41 =head1 SYNOPSIS
42
43   zetaback -v
44
45   zetaback [-l | -s | -sx | -sv] [--files] [-c conf] [-d] [-h host] [-z zfs]
46
47   zetaback -a [-c conf] [-d] [-h host] [-z zfs]
48
49   zetaback -b [-ff] [-fi] [-x] [-c conf] [-d] [-n] [-h host] [-z zfs]
50
51   zetaback -x [-b] [-c conf] [-d] [-n] [-h host] [-z zfs]
52
53   zetaback -r [-c conf] [-d] [-n] [-h host] [-z zfs] [-t timestamp]
54               [-rhost host] [-rzfs fs]
55
56 =cut
57
58 GetOptions(
59   "h=s"     => \$HOST,
60   "z=s"     => \$ZFS,
61   "c=s"     => \$CONF,
62   "a"       => \$ARCHIVE,
63   "b"       => \$BACKUP,
64   "l"       => \$LIST,
65   "s"       => \$SUMMARY,
66   "sx"      => \$SUMMARY_EXT,
67   "sv"      => \$SUMMARY_VIOLATORS,
68   "r"       => \$RESTORE,
69   "t=i"     => \$TIMESTAMP,
70   "rhost=s" => \$RESTORE_HOST,
71   "rzfs=s"  => \$RESTORE_ZFS,
72   "d"       => \$DEBUG,
73   "n"       => \$NEUTERED,
74   "x"       => \$EXPUNGE,
75   "v"       => \$VERSION,
76   "ff"      => \$FORCE_FULL,
77   "fi"      => \$FORCE_INC,
78   "files"   => \$SHOW_FILENAMES,
79 );
80
81 # actions allowed together 'x' and 'b' all others are exclusive:
82 my $actions = 0;
83 $actions++ if($ARCHIVE);
84 $actions++ if($BACKUP || $EXPUNGE);
85 $actions++ if($RESTORE);
86 $actions++ if($LIST);
87 $actions++ if($SUMMARY);
88 $actions++ if($SUMMARY_EXT);
89 $actions++ if($SUMMARY_VIOLATORS);
90 $actions++ if($VERSION);
91 $actions++ if($BACKUP && $FORCE_FULL && $FORCE_INC);
92 if($actions != 1) {
93   pod2usage({ -verbose => 0 });
94   exit -1;
95 }
96
97 =pod
98
99 =head1 DESCRIPTION
100
101 The B<zetaback> program orchestrates the backup (either full or
102 incremental) of remote ZFS filesystems to a local store.  It handles
103 frequency requirements for both full and incemental backups as well
104 as retention policies.  In addition to backups, the B<zetaback> tool
105 allows for the restore of any backup to a specified host and zfs
106 filesystem.
107
108 =head1 OPTIONS
109
110 The non-optional action command line arguments define the invocation purpose
111 of B<zetaback>.  All other arguments are optional and refine the target
112 of the action specified.
113
114 =head2 Generic Options
115
116 The following arguments have the same meaning over several actions:
117
118 =over
119
120 =item -c <conf>
121
122 Use the specified file as the configuration file.  The default file, if
123 none is specified is /usr/local/etc/zetaback.conf.  The prefix of this
124 file may also be specified as an argument to the configure script.
125
126 =item -d
127
128 Enable debugging output.
129
130 =item -n
131
132 Don't actually perform any remote commands or expunging.  This is useful with
133 the -d argument to ascertain what would be done if the command was actually
134 executed.
135
136 =item -t <timestamp>
137
138 Used during the restore process to specify a backup image from the desired
139 point in time.  If omitted, the command becomes interactive.  This timestamp
140 is a UNIX timestamp and is shown in the output of the -s and -sx actions.
141
142 =item -rhost <host>
143
144 Specify the remote host that is the target for a restore operation.  If
145 omitted the command becomes interactive.
146
147 =item -rzfs <zfs>
148
149 Specify the remote ZFS filesystem that is the target for a restore
150 operation.  If omitted the command becomes interactive.
151
152 =item -h <host>
153
154 Filters the operation to the host specified.  If <host> is of the form
155 /pattern/, it matches 'pattern' as a perl regular expression against available
156 hosts.  If omitted, no limit is enforced and all hosts are used for the action.
157
158 =item -z <zfs>
159
160 Filters the operation to the zfs filesystem specified.  If <zfs> is of the
161 form /pattern/, it matches 'pattern' as a perl regular expression against
162 available zfs filesystems.  If omitted, no filter is enforced and all zfs
163 filesystems are used for the action.
164
165 =back
166
167 =head2 Actions
168
169 =over
170
171 =item -v
172
173 Show the version.
174
175 =item -l
176
177 Show a brief listing of available backups.
178
179 =item -s
180
181 Like -l, -s will show a list of backups but provides additional information
182 about the backups including timestamp, type (full or incremental) and the
183 size on disk.
184
185 =item -sx
186
187 Shows an extended summary.  In addition to the output provided by the -s
188 action, the -sx action will show detail for each availble backup.  For
189 full backups, the detail will include any more recent full backups, if
190 they exist.  For incremental backups, the detail will include any
191 incremental backups that are more recent than the last full backup.
192
193 =item -sv
194
195 Display all backups in the current store that violate the configured
196 backup policy. This is where the most recent full backup is older than
197 full_interval seconds ago, or the most recent incremental backup is older
198 than backup_interval seconds ago.
199
200 =item --files
201
202 Display the on-disk file corresponding to each backup named in the output.
203 This is useful with the -sv flag to name violating files.  Often times,
204 violators are filesystems that have been removed on the host machines and
205 zetaback can no longer back them up.  Be very careful if you choose to
206 automate the removal of such backups as filesystems that would be backed up
207 by the next regular zetaback run will often show up as violators.
208
209 =item -a
210
211 Performs an archive.  This option will look at all eligible backup points
212 (as restricted by -z and -h) and move those to the configured archive
213 directory.  The recommended use is to first issue -sx --files then
214 carefully review available backup points and prune those that are
215 unneeded.  Then invoke with -a to move only the remaining "desired"
216 backup points into the archives.  Archived backups do not appear in any
217 listings or in the list of policy violators generated by the -sv option.
218 In effect, they are no longer "visible" to zetaback.
219
220 =item -b
221
222 Performs a backup.  This option will investigate all eligible hosts, query
223 the available filesystems from the remote agent and determine if any such
224 filesystems require a new full or incremental backup to be taken.  This
225 option may be combined with the -x option (to clean up afterwards.)
226
227 =item -ff
228
229 Forces a full backup to be taken on each filesystem encountered.  This is
230 used in combination with -b.  It is recommended to use this option only when
231 targeting specific filesystems (via the -h and -z options.)  Forcing a full
232 backup across all machines will cause staggered backups to coalesce and
233 could cause performance issues.
234
235 =item -fi
236
237 Forces an incremental backup to be taken on each filesystem encountered. 
238 This is used in combination with -b.  It is recommended to use this option
239 only when targeting specific filesystems (via the -h and -z options.)  Forcing
240 an incremental backup across all machines will cause staggered backups
241 to coalesce and could cause performance issues.
242
243 =item -x
244
245 Perform an expunge.  This option will determine which, if any, of the local
246 backups may be deleted given the retention policy specified in the
247 configuration.
248
249 =item -r
250
251 Perform a restore.  This option will operate on the specified backup and
252 restore it to the ZFS filesystem specified with -rzfs on the host specified
253 with the -rhost option.  The -h, -z and -t options may be used to filter
254 the source backup list.  If the filtered list contains more than one
255 source backup image, the command will act interactively.  If the -rhost
256 and -rzfs command are not specified, the command will act interactively.
257
258 =back
259
260 =cut
261
262 if($VERSION) {
263   print "zetaback: $version_string\n";
264   exit 0;
265 }
266
267 =pod
268
269 =head1 CONFIGURATION
270
271 The zetaback configuration file consists of a default stanza, containing
272 settings that can be overridden on a per-host basis.  A stanza begins
273 either with the string 'default', or a fully-qualified hostname, with
274 settings enclosed in braces ({}).  Single-line comments begin with a hash
275 ('#'), and whitespace is ignored, so feel free to indent for better
276 readability.  Every host to be backed up must have a host stanza in the
277 configuration file.
278
279 =head2 Settings
280
281 The following settings are valid in both the default and host scopes:
282
283 =over
284
285 =item store
286
287 The base directory under which to keep backups.  An interpolated variable
288 '%h' can be used, which expands to the hostname.  There is no default for
289 this setting.
290
291 =item archive
292
293 The base directory under which archives are stored.  The format is the same
294 as the store setting.  This is the destination to which files are relocated
295 when issuing an archive action (-a).
296
297 =item agent
298
299 The location of the zetaback_agent binary on the host.  There is no default
300 for this setting.
301
302 =item time_format
303
304 All timestamps within zetaback are in UNIX timestamp format.  This setting
305 provides a string for formatting all timestamps on output.  The sequences
306 available are identical to those in strftime(3).  If not specified, the
307 default is '%Y-%m-%d %H:%M:%S'.
308
309 =item backup_interval
310
311 The frequency (in seconds) at which to perform incremental backups.  An
312 incremental backup will be performed if the current time is more than
313 backup_interval since the last incremental backup.  If there is no full backup
314 for a particular filesystem, then a full backup is performed.  There is no
315 default for this setting.
316
317 =item full_interval
318
319 The frequency (in seconds) at which to perform full backups.  A full backup will
320 be performed if the current time is more than full_interval since the last full
321 backup.
322
323 =item retention
324
325 The retention time (in seconds) for backups.  This can be a simple number, in
326 which case all backups older than this will be expunged.
327
328 The retention specification can also be more complex, and consist of pairs of
329 values separated by a comma. The first value is a time period in seconds, and
330 the second value is how many backups should be retained within that period.
331 For example:
332
333 retention = 3600,4;86400,11
334
335 This will keep up to 4 backups for the first hour, and an additional 11
336 backups over 24 hours. The times do not stack. In other words, the 11 backups
337 would be kept during the period from 1 hour old to 24 hours old, or one every
338 2 hours.
339
340 Any backups older than the largest time given are deleted. In the above
341 example, all backups older than 24 hours are deleted.
342
343 If a second number is not specified, then all backups are kept within that
344 period.
345
346 Note: Full backups are never deleted if they are depended upon by an
347 incremental. In addition, the most recent backup is never deleted, regardless
348 of how old it is.
349
350 This value defaults to (14 * 86400), or two weeks.
351
352 =item compressionlevel
353
354 Compress files using gzip at the specified compression level. 0 means no
355 compression. Accepted values are 1-9. Defaults to 1 (fastest/minimal
356 compression.)
357
358 =item ssh_config
359
360 Full path to an alternate ssh client config.  This is useful for specifying a
361 less secure but faster cipher for some hosts, or using a different private
362 key.  There is no default for this setting.
363
364 =item dataset_backup
365
366 By default zetaback backs zfs filesystems up to files. This option lets you
367 specify that the backup go be stored as a zfs dataset on the backup host.
368
369 =item offline
370
371 Setting this option to 1 for a host will mark it as being 'offline'. Hosts
372 that are marked offline will not be backed up, will not have any old backups
373 expunged and will not be included in the list of policy violators. However,
374 the host will still be shown when listing backups and archiving.
375
376 =back
377
378 =head1 CONFIGURATION EXAMPLES
379
380 =head2 Uniform hosts
381
382 This config results in backups stored in /var/spool/zfs_backups, with a
383 subdirectory for each host.  Incremental backups will be performed
384 approximately once per day, assuming zetaback is run hourly.  Full backups
385 will be done once per week.  Time format and retention are default.
386
387   default {
388     store = /var/spool/zfs_backups/%h
389     agent = /usr/local/bin/zetaback_agent
390     backup_interval = 83000
391     full_interval = 604800
392   }
393
394   host1 {}
395
396   host2 {}
397
398 =head2 Non-uniform hosts
399
400 Here, host1's and host2's agents are found in different places, and host2's
401 backups should be stored in a different path.
402
403   default {
404     store = /var/spool/zfs_backups/%h
405     agent = /usr/local/bin/zetaback_agent
406     backup_interval = 83000
407     full_interval = 604800
408   }
409
410   host1 {
411     agent = /opt/local/bin/zetaback_agent
412   }
413
414   host2 {
415     store = /var/spool/alt_backups/%h
416     agent = /www/bin/zetaback_agent
417   }
418
419 =cut
420
421 # Make the parser more formal:
422 # config => stanza*
423 # stanza => string { kvp* }
424 # kvp    => string = string
425 my $str_re = qr/(?:"(?:\\\\|\\"|[^"])*"|\S+)/;
426 my $kvp_re = qr/($str_re)\s*=\s*($str_re)/;
427 my $stanza_re = qr/($str_re)\s*\{((?:\s*$kvp_re)*)\s*\}/;
428
429 sub parse_config() {
430   local($/);
431   $/ = undef;
432   open(CONF, "<$CONF") || die "Unable to open config file: $CONF";
433   my $file = <CONF>;
434   # Rip comments
435   $file =~ s/^\s*#.*$//mg;
436   while($file =~ m/$stanza_re/gm) {
437     my $scope = $1;
438     my $filepart = $2;
439     $scope =~ s/^"(.*)"$/$1/;
440     $conf{$scope} ||= {};
441     while($filepart =~ m/$kvp_re/gm) {
442       my $key = $1;
443       my $value = $2;
444       $key =~ s/^"(.*)"$/$1/;
445       $value =~ s/^"(.*)"$/$1/;
446       $conf{$scope}->{lc($key)} = $value;
447     }
448   }
449   close(CONF);
450 }
451 sub config_get($$;$) {
452   # Params: host, key, class
453   # Order of precedence: class, host, default
454   if ($_[2]) {
455     return $conf{$_[2]}->{$_[1]} || $conf{$_[0]}->{$_[1]} ||
456         $conf{'default'}->{$_[1]};
457   } else {
458     return $conf{$_[0]}->{$_[1]} || $conf{'default'}->{$_[1]};
459   }
460 }
461
462 sub get_store($;$) {
463   my ($host, $class) = @_;
464   my $store = config_get($host, 'store', $class);
465   $store =~ s/%h/$host/g;;
466   mkdir $store if(! -d $store);
467   return $store;
468 }
469
470 sub fs_encode($) {
471   my $d = shift;
472   my @parts = split('@', $d);
473   my $e = encode_base64($parts[0], '');
474   $e =~ s/\//_/g;
475   $e =~ s/=/-/g;
476   $e =~ s/\+/\./g;
477   if (exists $parts[1]) {
478     $e .= "\@$parts[1]";
479   }
480   return $e;
481 }
482 sub fs_decode($) {
483   my $e = shift;
484   $e =~ s/_/\//g;
485   $e =~ s/-/=/g;
486   $e =~ s/\./\+/g;
487   return decode_base64($e);
488 }
489 sub dir_encode($) {
490   my $d = shift;
491   my $e = encode_base64($d, '');
492   $e =~ s/\//_/;
493   return $e;
494 }
495 sub dir_decode($) {
496   my $e = shift;
497   $e =~ s/_/\//;
498   return decode_base64($e);
499 }
500 sub pretty_size($) {
501   my $bytes = shift;
502   if($bytes > 1024*1024*1024) {
503     return sprintf("%0.2f Gb", $bytes / (1024*1024*1024));
504   }
505   if($bytes > 1024*1024) {
506     return sprintf("%0.2f Mb", $bytes / (1024*1024));
507   }
508   if($bytes > 1024) {
509     return sprintf("%0.2f Kb", $bytes / (1024));
510   }
511   return "$bytes b";
512 }
513 sub lock($;$$) {
514   my ($host, $file, $nowait) = @_;
515   print "Acquiring lock for $host:$file\n" if($DEBUG);
516   $file ||= 'master.lock';
517   my $store = get_store($host); # Don't take classes into account - not needed
518   return 1 if(exists($locks{"$host:$file"}));
519   open(LOCK, "+>>$store/$file") || return 0;
520   unless(flock(LOCK, LOCK_EX | ($nowait ? LOCK_NB : 0))) {
521     close(LOCK);
522     return 0;
523   }
524   $locks{"$host:$file"} = \*LOCK;
525   return 1;
526 }
527 sub unlock($;$$) {
528   my ($host, $file, $remove) = @_;
529   print "Releasing lock for $host:$file\n" if($DEBUG);
530   $file ||= 'master.lock';
531   my $store = get_store($host); # Don't take classes into account - not needed
532   return 0 unless(exists($locks{"$host:$file"}));
533   *UNLOCK = $locks{$file};
534   unlink("$store/$file") if($remove);
535   flock(UNLOCK, LOCK_UN);
536   close(UNLOCK);
537   return 1;
538 }
539 sub scan_for_backups($) {
540   my %info = ();
541   my $dir = shift;
542   $info{last_full} = $info{last_incremental} = $info{last_backup} = 0;
543   # Look for standard file based backups first
544   opendir(D, $dir) || return \%info;
545   foreach my $file (readdir(D)) {
546     if($file =~ /^(\d+)\.([^\.]+)\.full$/) {
547       my $whence = $1;
548       my $fs = dir_decode($2);
549       $info{$fs}->{full}->{$whence}->{'file'} = "$dir/$file";
550       $info{$fs}->{last_full} = $whence if($whence > $info{$fs}->{last_full});
551       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
552                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
553     }
554     elsif($file =~ /^(\d+).([^\.]+)\.incremental.(\d+)$/) {
555       my $whence = $1;
556       my $fs = dir_decode($2);
557       $info{$fs}->{incremental}->{$whence}->{'depends'} = $3;
558       $info{$fs}->{incremental}->{$whence}->{'file'} = "$dir/$file";
559       $info{$fs}->{last_incremental} = $whence if($whence > $info{$fs}->{last_incremental});
560       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
561                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
562     }
563   }
564   closedir(D);
565   # Now look for zfs based backups
566   my $storefs;
567   eval {
568     $storefs = get_fs_from_mountpoint($dir);
569   };
570   return \%info if ($@);
571   my $rv = open(ZFSLIST, "__ZFS__ list -H -r -t snapshot $storefs |");
572   return \%info unless $rv;
573   while (<ZFSLIST>) {
574       my @F = split(' ');
575       my ($rawfs, $snap) = split('@', $F[0]);
576       my ($whence) = ($snap =~ /(\d+)/);
577       next unless $whence;
578       my @fsparts = split('/', $rawfs);
579       my $fs = fs_decode($fsparts[-1]);
580       # Treat a dataset backup as a full backup from the point of view of the
581       # backup lists
582       $info{$fs}->{full}->{$whence}->{'snapshot'} = $snap;
583       $info{$fs}->{full}->{$whence}->{'dataset'} = "$rawfs\@$snap";
584       # Note - this field isn't set for file backups - we probably should do
585       # this
586       $info{$fs}->{full}->{$whence}->{'pretty_size'} = "$F[1]";
587       $info{$fs}->{last_full} = $whence if ($whence >
588           $info{$fs}->{last_full});
589       $info{$fs}->{last_backup} = $whence if ($whence >
590           $info{$fs}->{last_backup});
591   }
592   close(ZFSLIST);
593
594   return \%info;
595 }
596
597 parse_config();
598
599 sub zetaback_log($$;@) {
600   my ($host, $mess, @args) = @_;
601   my $tf = config_get($host, 'time_format');
602   my $file = config_get($host, 'logfile');
603   my $fileh;
604   if(defined($file)) {
605     $fileh = IO::File->new(">>$file");
606   }
607   $fileh ||= IO::File->new(">&STDERR");
608   printf $fileh "%s: $mess", strftime($tf, localtime(time)), @args;
609   $fileh->close();
610 }
611
612 sub zfs_remove_snap($$$) {
613   my ($host, $fs, $snap) = @_;
614   my $agent = config_get($host, 'agent');
615   my $ssh_config = config_get($host, 'ssh_config');
616   $ssh_config = "-F $ssh_config" if($ssh_config);
617   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
618   return unless($snap);
619   print "Dropping $snap on $fs\n" if($DEBUG);
620   `ssh $ssh_config $host $agent -z $fs -d $snap`;
621 }
622
623 # Lots of args.. internally called.
624 sub zfs_do_backup($$$$$$;$) {
625   my ($host, $fs, $type, $point, $store, $dumpname, $base) = @_;
626   my ($storefs, $encodedname);
627   my $agent = config_get($host, 'agent');
628   my $ssh_config = config_get($host, 'ssh_config');
629   $ssh_config = "-F $ssh_config" if($ssh_config);
630   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
631
632   # compression is meaningless for dataset backups
633   if ($type ne "s") {
634     my $cl = config_get($host, 'compressionlevel');
635     if ($cl >= 1 && $cl <= 9) {
636         open(LBACKUP, "|gzip -$cl >$store/.$dumpname") ||
637         die "zfs_full_backup: cannot create dump\n";
638     } else {
639         open(LBACKUP, ">$store/.$dumpname") ||
640         die "zfs_full_backup: cannot create dump\n";
641     }
642   } else {
643     # Dataset backup - pipe received filesystem to zfs recv
644     eval {
645       $storefs = get_fs_from_mountpoint($store);
646     };
647     if ($@) {
648       # The zfs filesystem doesn't exist, so we have to work out what it
649       # would be
650       my $basestore = config_get($host, 'store');
651       $basestore =~ s/\/?%h//g;
652       $storefs = get_fs_from_mountpoint($basestore);
653       $storefs="$storefs/$host";
654     }
655     $encodedname = fs_encode($dumpname);
656     print STDERR "Receiving to zfs filesystem $storefs/$encodedname\n"
657       if($DEBUG);
658     zfs_create_intermediate_filesystems("$storefs/$encodedname");
659     open(LBACKUP, "|__ZFS__ recv $storefs/$encodedname");
660   }
661   # Do it. yeah.
662   eval {
663     if(my $pid = fork()) {
664       close(LBACKUP);
665       waitpid($pid, 0);
666       die "error: $?" if($?);
667     }
668     else {
669       my @cmd = ('ssh', split(/ /, $ssh_config), $host, $agent, '-z', $fs);
670       if ($type eq "i" || ($type eq "s" && $base)) {
671         push @cmd, ("-i", $base);
672       }
673       if ($type eq "f" || $type eq "s") {
674         push @cmd, ("-$type", $point);
675       }
676       open STDIN, "/dev/null" || exit(-1);
677       open STDOUT, ">&LBACKUP" || exit(-1);
678       print STDERR "   => @cmd\n" if($DEBUG);
679       exec { $cmd[0] } @cmd;
680       print STDERR "$cmd[0] failed: $?\n";
681       exit($?);
682     }
683     if ($type ne "s") {
684       die "dump failed (zero bytes)\n" if(-z "$store/.$dumpname");
685       rename("$store/.$dumpname", "$store/$dumpname") || die "cannot rename dump\n";
686     } else {
687       # Check everything is ok
688       `__ZFS__ list $storefs/$encodedname`;
689       die "dump failed (received snapshot $storefs/$encodedname does not exist)\n"
690         if $?;
691     }
692   };
693   if($@) {
694     if ($type ne "s") {
695         unlink("$store/.$dumpname");
696     }
697     chomp(my $error = $@);
698     $error =~ s/[\r\n]+/ /gsm;
699     zetaback_log($host, "FAILED[$error] $host:$fs $type\n");
700     die "zfs_full_backup: failed $@";
701   }
702   my $size;
703   if ($type ne "s") {
704     my @st = stat("$store/$dumpname");
705     $size = pretty_size($st[7]);
706   } else {
707     $size = `__ZFS__ get -Ho value used $storefs/$encodedname`;
708     chomp $size;
709   }
710   zetaback_log($host, "SUCCESS[$size] $host:$fs $type\n");
711 }
712
713 sub zfs_create_intermediate_filesystems($) {
714   my ($fs) = @_;
715   my $idx=0;
716   while (($idx = index($fs, '/', $idx+1)) != -1) {
717       my $fspart = substr($fs, 0, $idx);
718       `__ZFS__ list $fspart 2>&1`;
719       if ($?) {
720         print STDERR "Creating intermediate zfs filesystem: $fspart\n"
721           if $DEBUG;
722         `__ZFS__ create $fspart`;
723       }
724   }
725 }
726
727 sub zfs_full_backup($$$) {
728   my ($host, $fs, $store) = @_;
729
730   # Translate into a proper dumpname
731   my $point = time();
732   my $efs = dir_encode($fs);
733   my $dumpname = "$point.$efs.full";
734
735   zfs_do_backup($host, $fs, 'f', $point, $store, $dumpname);
736 }
737
738 sub zfs_incremental_backup($$$$) {
739   my ($host, $fs, $base, $store) = @_;
740   my $agent = config_get($host, 'agent');
741
742   # Translate into a proper dumpname
743   my $point = time();
744   my $efs = dir_encode($fs);
745   my $dumpname = "$point.$efs.incremental.$base";
746
747   zfs_do_backup($host, $fs, 'i', $point, $store, $dumpname, $base);
748 }
749
750 sub zfs_dataset_backup($$$$) {
751   my ($host, $fs, $base, $store) = @_;
752   my $agent = config_get($host, 'agent');
753
754   my $point = time();
755   my $dumpname = "$fs\@$point";
756
757   zfs_do_backup($host, $fs, 's', $point, $store, $dumpname, $base);
758 }
759
760 sub perform_retention($$) {
761   my ($host, $store) = @_;
762   my $backup_info = scan_for_backups($store);
763   my $retention = config_get($host, 'retention');
764   my $now = time();
765
766   if ($DEBUG) {
767     print "Performing retention for $host\n";
768   }
769
770   foreach my $disk (sort keys %{$backup_info}) {
771     my $info = $backup_info->{$disk};
772     next unless(ref($info) eq 'HASH');
773     my %must_save;
774
775     if ($DEBUG) {
776       print "   $disk\n";
777     }
778
779     # Get a list of all the full and incrementals, sorts newest to oldest
780     my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
781     @backup_points = sort { $b <=> $a } @backup_points;
782
783     # We _cannot_ throw away _all_ our backups,
784     # so save the most recent incremental and full no matter what
785     push(@{$must_save{$backup_points[0]}}, "most recent backup");
786     my @fulls = grep { exists($info->{full}->{$_}) } @backup_points;
787     push(@{$must_save{$fulls[0]}}, "most recent full");
788
789     # Process retention policy
790     my @parts = split(/;/, $retention);
791     my %retention_map;
792     foreach (@parts) {
793       my ($period, $amount) = split(/,/);
794       if (!defined($amount)) {
795         $amount = -1;
796       }
797       $retention_map{$period} = $amount;
798     }
799     my @periods = sort { $a <=> $b } keys(%retention_map);
800     my %backup_bins;
801     foreach(@periods) {
802       $backup_bins{$_} = ();
803     }
804     my $cutoff = $now - $periods[0];
805     # Sort backups into time period sections
806     foreach (@backup_points) {
807       # @backup_points is in descending order (newest first)
808       while ($_ <= $cutoff) {
809         # Move to the next largest bin if the current backup is not in the
810         # current bin. However, if there is no larger bin, then don't
811         shift(@periods);
812         if (@periods) {
813           $cutoff = $now - $periods[0];
814         } else {
815           last;
816         }
817       }
818       # Throw away all backups older than the largest time period specified
819       if (!@periods) {
820         last;
821       }
822       push(@{$backup_bins{$periods[0]}}, $_);
823     }
824     foreach (keys(%backup_bins)) {
825       my $keep = $retention_map{$_}; # How many backups to keep
826       if ($backup_bins{$_}) {
827         my @backups = @{$backup_bins{$_}};
828         my $total = @backups;  # How many backups we have
829         # If we didn't specify how many to keep, keep them all
830         if ($keep == -1) { $keep = $total };
831         # If we have less backups than we should keep, keep them all
832         if ($total < $keep) { $keep = $total };
833         for (my $i = 1; $i <= $keep; $i++) {
834           my $idx = int(($i * $total) / $keep) - 1;
835           push(@{$must_save{$backups[$idx]}}, "retention policy - $_");
836         }
837       }
838     }
839     if ($DEBUG) {
840       print "    => Backup bins:\n";
841       foreach my $a (keys(%backup_bins)) {
842         print "      => $a\n";
843         foreach my $i (@{$backup_bins{$a}}) {
844           my $trans = $now - $i;
845           print "         => $i ($trans seconds old)";
846           if (exists($must_save{$i})) { print " => keep" };
847           print "\n";
848         }
849       }
850     }
851
852     # Look for dependencies
853     foreach (@backup_points) {
854       if(exists($info->{incremental}->{$_})) {
855         print "   => $_ depends on $info->{incremental}->{$_}->{depends}\n" if($DEBUG);
856         if (exists($must_save{$_})) {
857           push(@{$must_save{$info->{incremental}->{$_}->{depends}}},
858             "dependency");
859         }
860       }
861     }
862
863     my @removals = grep { !exists($must_save{$_}) } @backup_points;
864     if($DEBUG) {
865       my $tf = config_get($host, 'time_format');
866       print "    => Candidates for removal:\n";
867       foreach (@backup_points) {
868         print "      => ". strftime($tf, localtime($_));
869         print " ($_)";
870         print " [". (exists($info->{full}->{$_}) ? "full":"incremental") ."]";
871         if (exists($must_save{$_})) {
872           my $reason = join(", ", @{$must_save{$_}});
873           print " => keep ($reason)";
874         } else {
875           print " => remove";
876         }
877         print "\n";
878       }
879     }
880     foreach (@removals) {
881       my $efs = dir_encode($disk);
882       my $filename;
883       my $dataset;
884       if(exists($info->{full}->{$_}->{file})) {
885         $filename = $info->{full}->{$_}->{file};
886       } elsif(exists($info->{incremental}->{$_}->{file})) {
887         $filename = $info->{incremental}->{$_}->{file};
888       } elsif(exists($info->{full}->{$_}->{dataset})) {
889         $dataset = $info->{full}->{$_}->{dataset};
890       } elsif(exists($info->{incremental}->{$_}->{dataset})) {
891         $dataset = $info->{incremental}->{$_}->{dataset};
892       } else {
893         print "ERROR: We tried to expunge $host $disk [$_], but couldn't find it.\n";
894       }
895       print "    => expunging ${filename}${dataset}\n" if($DEBUG);
896       unless($NEUTERED) {
897         if ($filename) {
898           unlink($filename) || print "ERROR: unlink $filename: $?\n";
899         } elsif ($dataset) {
900           `__ZFS__ destroy $dataset`;
901           if ($?) {
902             print "ERROR: zfs destroy $dataset: $?\n";
903           }
904         }
905       }
906     }
907   }
908 }
909
910 sub __default_sort($$) { return $_[0] cmp $_[1]; }
911    
912 sub choose($$;$) {
913   my($name, $obj, $sort) = @_;
914   $sort ||= \&__default_sort;;
915   my @list;
916   my $hash;
917   if(ref $obj eq 'ARRAY') {
918     @list = sort { $sort->($a,$b); } (@$obj);
919     map { $hash->{$_} = $_; } @list;
920   }
921   elsif(ref $obj eq 'HASH') {
922     @list = sort { $sort->($a,$b); } (keys %$obj);
923     $hash = $obj;
924   }
925   else {
926     die "choose passed bad object: " . ref($obj) . "\n";
927   }
928   return $list[0] if(scalar(@list) == 1);
929   print "\n";
930   my $i = 1;
931   for (@list) {
932     printf " %3d) $hash->{$_}\n", $i++;
933   }
934   my $selection = 0;
935   while($selection !~ /^\d+$/ or
936         $selection < 1 or
937         $selection >= $i) {
938     print "$name: ";
939     chomp($selection = <>);
940   }
941   return $list[$selection - 1];
942 }
943
944 sub backup_chain($$) {
945   my ($info, $ts) = @_;
946   my @list;
947   push @list, $info->{full}->{$ts} if(exists($info->{full}->{$ts}));
948   if(exists($info->{incremental}->{$ts})) {
949     push @list, $info->{incremental}->{$ts};
950     push @list, backup_chain($info, $info->{incremental}->{$ts}->{depends});
951   }
952   return @list;
953 }
954
955 sub get_fs_from_mountpoint($) {
956     my ($mountpoint) = @_;
957     my $fs;
958     my $rv = open(ZFSLIST, "__ZFS__ list -t filesystem -H |");
959     die "Unable to determine zfs filesystem for $mountpoint" unless $rv;
960     while (<ZFSLIST>) {
961         my @F = split(' ');
962         if ($F[-1] eq $mountpoint) {
963             $fs = $F[0];
964             last;
965         }
966     }
967     close(ZFSLIST);
968     die "Unable to determine zfs filesystem for $mountpoint" unless $fs;
969     return $fs;
970 }
971
972 sub perform_restore() {
973   my %source;
974
975   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
976       keys %conf) {
977     # If -h was specific, we will skip this host if the arg isn't
978     # an exact match or a pattern match
979     if($HOST &&
980        !(($HOST eq $host) ||
981          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
982       next;
983     }
984
985     my $store = get_store($host);
986
987     my $backup_info = scan_for_backups($store);
988     foreach my $disk (sort keys %{$backup_info}) {
989       my $info = $backup_info->{$disk};
990       next unless(ref($info) eq 'HASH');
991       next
992         if($ZFS &&      # if the pattern was specified it could
993            !($disk eq $ZFS ||        # be a specific match or a
994              ($ZFS =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
995       # We want to see this one
996       my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
997       my @source_points;
998       foreach (@backup_points) {
999         push @source_points, $_ if(!$TIMESTAMP || $TIMESTAMP == $_)
1000       }
1001       if(@source_points) {
1002         $source{$host}->{$disk} = \@source_points;
1003       }
1004     }
1005   }
1006
1007   if(! keys %source) {
1008     print "No matching backups found\n";
1009     return;
1010   }
1011
1012   # Here goes the possibly interactive dialog
1013   my $host = choose("Restore from host",  [keys %source]);
1014   my $disk = choose("Restore from ZFS", [keys %{$source{$host}}]);
1015  
1016   # Times are special.  We build a human readable form and use a numerical
1017   # sort function instead of the default lexical one.
1018   my %times;
1019   my $tf = config_get($host, 'time_format');
1020   map { $times{$_} = strftime($tf, localtime($_)); } @{$source{$host}->{$disk}};
1021   my $timestamp = choose("Restore as of timestamp", \%times,
1022                          sub { $_[0] <=> $_[1]; });
1023
1024   my $store = get_store($host);
1025   my $backup_info = scan_for_backups($store);
1026   my @backup_list = reverse backup_chain($backup_info->{$disk}, $timestamp);
1027
1028   if(!$RESTORE_HOST) {
1029     print "Restore to host [$host]:";
1030     chomp(my $input = <>);
1031     $RESTORE_HOST = length($input) ? $input : $host;
1032   }
1033   if(!$RESTORE_ZFS) {
1034     print "Restore to zfs [$disk]:";
1035     chomp(my $input = <>);
1036     $RESTORE_ZFS = length($input) ? $input : $disk;
1037   }
1038
1039   # show intentions
1040   print "Going to restore:\n";
1041   print "\tfrom: $host\n";
1042   print "\tfrom: $disk\n";
1043   print "\t  at: $timestamp [" . strftime($tf, localtime($timestamp)) . "]\n";
1044   print "\t  to: $RESTORE_HOST\n";
1045   print "\t  to: $RESTORE_ZFS\n";
1046   print "\n";
1047
1048   foreach(@backup_list) {
1049     $_->{success} = zfs_restore_part($RESTORE_HOST, $RESTORE_ZFS, $_->{file}, $_->{dataset}, $_->{depends});
1050   }
1051 }
1052
1053 sub zfs_restore_part($$$$;$) {
1054   my ($host, $fs, $file, $dataset, $dep) = @_;
1055   unless ($file || $dataset) {
1056     print STDERR "=> No dataset or filename given to restore. Bailing out.";
1057     return 1;
1058   }
1059   my $ssh_config = config_get($host, 'ssh_config');
1060   $ssh_config = "-F $ssh_config" if($ssh_config);
1061   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
1062   my $command;
1063   if(exists($conf{$host})) {
1064     my $agent = config_get($host, 'agent');
1065     $command = "$agent -r -z $fs";
1066     $command .= " -b $dep" if($dep);
1067   }
1068   else {
1069     $command = "__ZFS__ recv $fs";
1070   }
1071   if ($file) {
1072     print " => piping $file to $command\n" if($DEBUG);
1073     print "gzip -dfc $file | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1074   } elsif ($dataset) {
1075     print " => piping $dataset to $command using zfs send\n" if ($DEBUG);
1076     print "zfs send $dataset | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1077   }
1078   unless($NEUTERED) {
1079     if ($file) {
1080       open(DUMP, "gzip -dfc $file |");
1081     } elsif ($dataset) {
1082       open(DUMP, "__ZFS__ send $dataset |");
1083     }
1084     eval {
1085       open(RECEIVER, "| ssh $ssh_config $host $command");
1086       my $buffer;
1087       while(my $len = sysread(DUMP, $buffer, $BLOCKSIZE)) {
1088         if(syswrite(RECEIVER, $buffer, $len) != $len) {
1089           die "$!";
1090         }
1091       }
1092     };
1093     close(DUMP);
1094     close(RECEIVER);
1095   }
1096   return $?;
1097 }
1098
1099 sub pretty_print_backup($$$) {
1100   my ($info, $host, $point) = @_;
1101   my $tf = config_get($host, 'time_format');
1102   print "\t" . strftime($tf, localtime($point)) . " [$point] ";
1103   if(exists($info->{full}->{$point})) {
1104     if ($info->{full}->{$point}->{file}) {
1105       my @st = stat($info->{full}->{$point}->{file});
1106       print "FULL " . pretty_size($st[7]);
1107       print "\n\tfile: $info->{full}->{$point}->{file}" if($SHOW_FILENAMES);
1108     } elsif ($info->{full}->{$point}->{dataset}) {
1109       print "FULL $info->{full}->{$point}->{pretty_size}";
1110       print "\n\tdataset: $info->{full}->{$point}->{dataset}"
1111         if($SHOW_FILENAMES);
1112     }
1113   } else {
1114     my @st = stat($info->{incremental}->{$point}->{file});
1115     print "INCR from [$info->{incremental}->{$point}->{depends}] " . pretty_size($st[7]);
1116     print "\n\tfile: $info->{incremental}->{$point}->{file}" if($SHOW_FILENAMES);
1117   }
1118   print "\n";
1119 }
1120
1121 sub show_backups($$$) {
1122   my ($host, $store, $diskpat) = @_;
1123   my $backup_info = scan_for_backups($store);
1124   my $tf = config_get($host, 'time_format');
1125   my (@files, @datasets);
1126   foreach my $disk (sort keys %{$backup_info}) {
1127     my $info = $backup_info->{$disk};
1128     next unless(ref($info) eq 'HASH');
1129     next
1130       if($diskpat &&      # if the pattern was specified it could
1131          !($disk eq $diskpat ||        # be a specific match or a
1132            ($diskpat =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
1133
1134     my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
1135     @backup_points = sort { $a <=> $b } @backup_points;
1136     @backup_points = (pop @backup_points) unless ($ARCHIVE || $SUMMARY_EXT || $SUMMARY_VIOLATORS);
1137
1138     # Quick short-circuit in the case of retention violation checks
1139     if($SUMMARY_VIOLATORS) {
1140       if(time() > $info->{last_full} + config_get($host, 'full_interval') ||
1141          time() > $info->{last_backup} + config_get($host, 'backup_interval')) {
1142         print "$host:$disk\n";
1143         pretty_print_backup($info, $host, $info->{last_full});
1144         # Only print the last backup if it isn't the same as the last full
1145         if ($info->{last_full} != $info->{last_backup}) {
1146             pretty_print_backup($info, $host, $info->{last_backup});
1147         }
1148       }
1149       next;
1150     }
1151
1152     # We want to see this one
1153     print "$host:$disk\n";
1154     next unless($SUMMARY || $SUMMARY_EXT || $ARCHIVE);
1155     if($SUMMARY_EXT) {
1156       print "\tLast Full: ". ($info->{last_full} ? strftime($tf, localtime($info->{last_full})) : "Never") . "\n";
1157       if($info->{last_full} < $info->{last_incremental}) {
1158         print "\tLast Incr: ". strftime($tf, localtime($info->{last_incremental})). "\n";
1159       }
1160     }
1161     foreach (@backup_points) {
1162       pretty_print_backup($info, $host, $_);
1163       if(exists($info->{full}->{$_}->{file})) {
1164         push @files, $info->{full}->{$_}->{file};
1165       } elsif(exists($info->{incremental}->{$_}->{file})) {
1166         push @files, $info->{incremental}->{$_}->{file};
1167       } elsif(exists($info->{full}->{$_}->{dataset})) {
1168         push @datasets, $info->{full}->{$_}->{dataset}
1169       }
1170     }
1171     print "\n";
1172   }
1173   if($ARCHIVE && (scalar(@files) || scalar(@datasets))) {
1174     print "\nAre you sure you would like to archive ".scalar(@files).
1175       " file(s) and ".scalar(@datasets)." dataset(s)? ";
1176     while(($_ = <>) !~ /(?:y|n|yes|no)$/i) {
1177       print "\nAre you sure you would like to archive ".scalar(@files).
1178         " file(s) and ".scalar(@datasets)." dataset(s)? ";
1179     }
1180     if(/^y/i) {
1181       if (@files) {
1182         my $archive = config_get($host, 'archive');
1183         $archive =~ s/%h/$host/g;
1184         if(! -d $archive) {
1185           mkdir $archive || die "Cannot mkdir($archive)\n";
1186         }
1187         foreach my $file (@files) {
1188           (my $afile = $file) =~ s/^$store/$archive/;
1189           move($file, $afile) || print "Error archiving $file: $!\n";
1190         }
1191       }
1192       if (@datasets) {
1193         my $archive = config_get($host, 'archive');
1194         my $storefs = get_fs_from_mountpoint($store);
1195         (my $basearchive = $archive) =~ s/\/?%h//g;
1196         my $basearchivefs;
1197         eval {
1198           $basearchivefs = get_fs_from_mountpoint($basearchive);
1199         };
1200         die "Unable to find archive filesystem. The archive directory must be the root of a zfs filesystem to archive datasets." if $@;
1201         my $archivefs = "$basearchivefs/$host";
1202         `__ZFS__ create $archivefs`; # We don't care if this fails
1203         my %seen = ();
1204         foreach my $dataset (@datasets) {
1205           $dataset =~ s/@.*$//; # Only rename filesystems, not snapshots
1206           next if $seen{$dataset}++; # Only rename a filesystem once
1207           (my $adataset = $dataset) =~ s/^$storefs/$archivefs/;
1208           `__ZFS__ rename $dataset $adataset`;
1209           if ($?) {
1210             print "Error archiving $dataset\n";
1211           }
1212         }
1213       }
1214     }
1215   }
1216 }
1217
1218 sub plan_and_run($$) {
1219   my ($host, $diskpat) = @_;
1220   my $store;
1221   my $ssh_config = config_get($host, 'ssh_config');
1222   $ssh_config = "-F $ssh_config" if($ssh_config);
1223   my %suppress;
1224   print "Planning '$host'\n" if($DEBUG);
1225   my $agent = config_get($host, 'agent');
1226   my $took_action = 1;
1227   while($took_action) {
1228     $took_action = 0;
1229     my @disklist;
1230
1231     # We need a lock for the listing.
1232     return unless(lock($host, ".list"));
1233
1234     # Get list of zfs filesystems from the agent
1235     open(SILENT, ">&", \*STDERR);
1236     close(STDERR);
1237     my $rv = open(ZFSLIST, "ssh $ssh_config $host $agent -l |");
1238     open(STDERR, ">&", \*SILENT);
1239     close(SILENT);
1240     next unless $rv;
1241     @disklist = grep { chomp } (<ZFSLIST>);
1242     close(ZFSLIST);
1243
1244     if ($DEBUG) {
1245       print " => Filesystems for $host (zetaback_agent -l output)\n";
1246       foreach my $diskline (@disklist) {
1247         print "    $diskline\n";
1248       }
1249     }
1250
1251     foreach my $diskline (@disklist) {
1252       chomp($diskline);
1253       next unless($diskline =~ /^(\S+) \[([^\]]*)\](?: {([^}]*}))?/);
1254       my $diskname = $1;
1255       my %snaps;
1256       map { $snaps{$_} = 1 } (split(/,/, $2));
1257       my $class = $3;
1258       $store = get_store($host, $class);
1259  
1260       # We've just done this.
1261       next if($suppress{"$host:$diskname"});
1262       # If we are being selective (via -z) now is the time.
1263       next
1264         if($diskpat &&          # if the pattern was specified it could
1265            !($diskname eq $diskpat ||        # be a specific match or a
1266              ($diskpat =~ /^\/(.+)\/$/ && $diskname =~ /$1/))); # regex
1267  
1268       print " => Scanning '$store' for old backups of '$diskname'.\n" if($DEBUG);
1269
1270       # Make directory on demand
1271       my $backup_info = scan_for_backups($store);
1272       # That gave us info on all backups, we just want this disk
1273       $backup_info = $backup_info->{$diskname} || {};
1274  
1275       # Should we do a backup?
1276       my $backup_type = 'no';
1277       if(time() > $backup_info->{last_backup} + config_get($host,
1278           'backup_interval', $class)) {
1279         $backup_type = 'incremental';
1280       }
1281       if(time() > $backup_info->{last_full} + config_get($host,
1282           'full_interval', $class)) {
1283         $backup_type = 'full';
1284       }
1285       # If we want an incremental, but have no full, then we need to upgrade to full
1286       if($backup_type eq 'incremental') {
1287         my $have_full_locally = 0;
1288         # For each local full backup, see if the full backup still exists on the other end.
1289         foreach (keys %{$backup_info->{'full'}}) {
1290           $have_full_locally = 1 if(exists($snaps{'__zb_full_' . $_}));
1291         }
1292         $backup_type = 'full' unless($have_full_locally);
1293       }
1294       $backup_type = 'full' if($FORCE_FULL);
1295       $backup_type = 'incremental' if($FORCE_INC);
1296       $backup_type = 'dataset' if(config_get($host, 'dataset_backup', $class)
1297         eq 1 && $backup_type ne 'no');
1298
1299       print " => doing $backup_type backup\n" if($DEBUG);
1300       # We need to drop a __zb_base snap or a __zb_incr snap before we proceed
1301       unless($NEUTERED || $backup_type eq 'no') {
1302         # attempt to lock this action, if it fails, skip -- someone else is working it.
1303         next unless(lock($host, dir_encode($diskname), 1));
1304         unlock($host, '.list');
1305
1306         if($backup_type eq 'full') {
1307           eval { zfs_full_backup($host, $diskname, $store); };
1308           if ($@) {
1309             chomp(my $err = $@);
1310             print " => failure $err\n";
1311           }
1312           else {
1313             # Unless there was an error backing up, remove all the other full snaps
1314             foreach (keys %snaps) {
1315               zfs_remove_snap($host, $diskname, $_) if(/^__zb_full_(\d+)/)
1316             }
1317           }
1318           $took_action = 1;
1319         }
1320         if($backup_type eq 'incremental') {
1321           eval {
1322             zfs_remove_snap($host, $diskname, '__zb_incr') if($snaps{'__zb_incr'});
1323             # Find the newest full from which to do an incremental (NOTE: reverse numeric sort)
1324             my @fulls = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1325             zfs_incremental_backup($host, $diskname, $fulls[0], $store);
1326           };
1327           if ($@) {
1328             chomp(my $err = $@);
1329             print " => failure $err\n";
1330           }
1331           else {
1332             $took_action = 1;
1333           }
1334         }
1335         if($backup_type eq 'dataset') {
1336           my @backups = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1337           eval { zfs_dataset_backup($host, $diskname, $backups[0], $store); };
1338           if ($@) {
1339             chomp(my $err = $@);
1340             print " => failure $err\n";
1341           }
1342           else {
1343             # Unless there was an error backing up, remove all the other dset snaps
1344             foreach (keys %snaps) {
1345               zfs_remove_snap($host, $diskname, $_) if(/^__zb_dset_(\d+)/)
1346             }
1347           }
1348           $took_action = 1;
1349         }
1350         unlock($host, dir_encode($diskname), 1);
1351       }
1352       $suppress{"$host:$diskname"} = 1;
1353       last if($took_action);
1354     }
1355     unlock($host, '.list');
1356   }
1357 }
1358
1359 if($RESTORE) {
1360   perform_restore();
1361 }
1362 else {
1363   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
1364       keys %conf) {
1365     # If -h was specific, we will skip this host if the arg isn't
1366     # an exact match or a pattern match
1367     if($HOST &&
1368        !(($HOST eq $host) ||
1369          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
1370       next;
1371     }
1372
1373     # Skip if the host is marked as 'offline' and we are not listing backups
1374     if (config_get($host, 'offline') == 1 &&
1375         !$LIST && !$SUMMARY && !$SUMMARY_EXT && !$ARCHIVE) {
1376       next;
1377     }
1378
1379     # TODO - remove once class functionality is completed
1380     my $store = get_store($host, '');
1381  
1382     if($LIST || $SUMMARY || $SUMMARY_EXT || $SUMMARY_VIOLATORS || $ARCHIVE) {
1383       show_backups($host, $store, $ZFS);
1384     }
1385     if($BACKUP) {
1386       plan_and_run($host, $ZFS);
1387     }
1388     if($EXPUNGE) {
1389       perform_retention($host, $store);
1390     }
1391   }
1392 }
1393
1394 exit 0;
1395
1396 =pod
1397
1398 =head1 FILES
1399
1400 =over
1401
1402 =item zetaback.conf
1403
1404 The main zetaback configuration file.  The location of the file can be
1405 specified on the command line with the -c flag.  The prefix of this
1406 file may also be specified as an argument to the configure script.
1407
1408 =back
1409
1410 =head1 SEE ALSO
1411
1412 zetaback_agent(1)
1413
1414 =cut
Note: See TracBrowser for help on using the browser.