root/zetaback.in

Revision dc11efaa881b0eadfba92e9a6a768e812a252784, 43.9 kB (checked in by Mark Harrison <mark@omniti.com>, 4 years ago)

Merge changset 141 in classes branch

  • Property mode set to 100755
Line 
1 #!/usr/bin/perl
2 # vim: sts=2 sw=2 ts=8 et
3
4 # Copyright (c) 2007 OmniTI Computer Consulting, Inc. All rights reserved.
5 # For information on licensing see:
6 #   https://labs.omniti.com/zetaback/trunk/LICENSE
7
8 use strict;
9 use Getopt::Long;
10 use MIME::Base64;
11 use POSIX qw/strftime/;
12 use Fcntl qw/:flock/;
13 use File::Copy;
14 use IO::File;
15 use Pod::Usage;
16
17 use vars qw/%conf %locks $version_string
18             $PREFIX $CONF $BLOCKSIZE $DEBUG $HOST $BACKUP
19             $RESTORE $RESTORE_HOST $RESTORE_ZFS $TIMESTAMP
20             $LIST $SUMMARY $SUMMARY_EXT $SUMMARY_VIOLATORS
21             $FORCE_FULL $FORCE_INC $EXPUNGE $NEUTERED $ZFS
22             $SHOW_FILENAMES $ARCHIVE $VERSION $HELP/;
23 $version_string = q$URL$;
24 $version_string =~ s#/branches/#/b#;
25 $version_string =~ s#^.*/([^/]+)/[^/]+$#$1#;
26 $PREFIX = q^__PREFIX__^;
27 $CONF = qq^$PREFIX/etc/zetaback.conf^;
28 $BLOCKSIZE = 1024*64;
29
30 $conf{'default'}->{'time_format'} = "%Y-%m-%d %H:%M:%S";
31 $conf{'default'}->{'retention'} = 14 * 86400;
32 $conf{'default'}->{'compressionlevel'} = 1;
33 $conf{'default'}->{'dataset_backup'} = 0;
34
35 =pod
36
37 =head1 NAME
38
39 zetaback - perform backup, restore and retention policies for ZFS backups.
40
41 =head1 SYNOPSIS
42
43   zetaback -v
44
45   zetaback [-l | -s | -sx | -sv] [--files] [-c conf] [-d] [-h host] [-z zfs]
46
47   zetaback -a [-c conf] [-d] [-h host] [-z zfs]
48
49   zetaback -b [-ff] [-fi] [-x] [-c conf] [-d] [-n] [-h host] [-z zfs]
50
51   zetaback -x [-b] [-c conf] [-d] [-n] [-h host] [-z zfs]
52
53   zetaback -r [-c conf] [-d] [-n] [-h host] [-z zfs] [-t timestamp]
54               [-rhost host] [-rzfs fs]
55
56 =cut
57
58 GetOptions(
59   "h=s"     => \$HOST,
60   "z=s"     => \$ZFS,
61   "c=s"     => \$CONF,
62   "a"       => \$ARCHIVE,
63   "b"       => \$BACKUP,
64   "l"       => \$LIST,
65   "s"       => \$SUMMARY,
66   "sx"      => \$SUMMARY_EXT,
67   "sv"      => \$SUMMARY_VIOLATORS,
68   "r"       => \$RESTORE,
69   "t=i"     => \$TIMESTAMP,
70   "rhost=s" => \$RESTORE_HOST,
71   "rzfs=s"  => \$RESTORE_ZFS,
72   "d"       => \$DEBUG,
73   "n"       => \$NEUTERED,
74   "x"       => \$EXPUNGE,
75   "v"       => \$VERSION,
76   "ff"      => \$FORCE_FULL,
77   "fi"      => \$FORCE_INC,
78   "files"   => \$SHOW_FILENAMES,
79 );
80
81 # actions allowed together 'x' and 'b' all others are exclusive:
82 my $actions = 0;
83 $actions++ if($ARCHIVE);
84 $actions++ if($BACKUP || $EXPUNGE);
85 $actions++ if($RESTORE);
86 $actions++ if($LIST);
87 $actions++ if($SUMMARY);
88 $actions++ if($SUMMARY_EXT);
89 $actions++ if($SUMMARY_VIOLATORS);
90 $actions++ if($VERSION);
91 $actions++ if($BACKUP && $FORCE_FULL && $FORCE_INC);
92 if($actions != 1) {
93   pod2usage({ -verbose => 0 });
94   exit -1;
95 }
96
97 =pod
98
99 =head1 DESCRIPTION
100
101 The B<zetaback> program orchestrates the backup (either full or
102 incremental) of remote ZFS filesystems to a local store.  It handles
103 frequency requirements for both full and incemental backups as well
104 as retention policies.  In addition to backups, the B<zetaback> tool
105 allows for the restore of any backup to a specified host and zfs
106 filesystem.
107
108 =head1 OPTIONS
109
110 The non-optional action command line arguments define the invocation purpose
111 of B<zetaback>.  All other arguments are optional and refine the target
112 of the action specified.
113
114 =head2 Generic Options
115
116 The following arguments have the same meaning over several actions:
117
118 =over
119
120 =item -c <conf>
121
122 Use the specified file as the configuration file.  The default file, if
123 none is specified is /usr/local/etc/zetaback.conf.  The prefix of this
124 file may also be specified as an argument to the configure script.
125
126 =item -d
127
128 Enable debugging output.
129
130 =item -n
131
132 Don't actually perform any remote commands or expunging.  This is useful with
133 the -d argument to ascertain what would be done if the command was actually
134 executed.
135
136 =item -t <timestamp>
137
138 Used during the restore process to specify a backup image from the desired
139 point in time.  If omitted, the command becomes interactive.  This timestamp
140 is a UNIX timestamp and is shown in the output of the -s and -sx actions.
141
142 =item -rhost <host>
143
144 Specify the remote host that is the target for a restore operation.  If
145 omitted the command becomes interactive.
146
147 =item -rzfs <zfs>
148
149 Specify the remote ZFS filesystem that is the target for a restore
150 operation.  If omitted the command becomes interactive.
151
152 =item -h <host>
153
154 Filters the operation to the host specified.  If <host> is of the form
155 /pattern/, it matches 'pattern' as a perl regular expression against available
156 hosts.  If omitted, no limit is enforced and all hosts are used for the action.
157
158 =item -z <zfs>
159
160 Filters the operation to the zfs filesystem specified.  If <zfs> is of the
161 form /pattern/, it matches 'pattern' as a perl regular expression against
162 available zfs filesystems.  If omitted, no filter is enforced and all zfs
163 filesystems are used for the action.
164
165 =back
166
167 =head2 Actions
168
169 =over
170
171 =item -v
172
173 Show the version.
174
175 =item -l
176
177 Show a brief listing of available backups.
178
179 =item -s
180
181 Like -l, -s will show a list of backups but provides additional information
182 about the backups including timestamp, type (full or incremental) and the
183 size on disk.
184
185 =item -sx
186
187 Shows an extended summary.  In addition to the output provided by the -s
188 action, the -sx action will show detail for each availble backup.  For
189 full backups, the detail will include any more recent full backups, if
190 they exist.  For incremental backups, the detail will include any
191 incremental backups that are more recent than the last full backup.
192
193 =item -sv
194
195 Display all backups in the current store that violate the configured
196 backup policy. This is where the most recent full backup is older than
197 full_interval seconds ago, or the most recent incremental backup is older
198 than backup_interval seconds ago.
199
200 =item --files
201
202 Display the on-disk file corresponding to each backup named in the output.
203 This is useful with the -sv flag to name violating files.  Often times,
204 violators are filesystems that have been removed on the host machines and
205 zetaback can no longer back them up.  Be very careful if you choose to
206 automate the removal of such backups as filesystems that would be backed up
207 by the next regular zetaback run will often show up as violators.
208
209 =item -a
210
211 Performs an archive.  This option will look at all eligible backup points
212 (as restricted by -z and -h) and move those to the configured archive
213 directory.  The recommended use is to first issue -sx --files then
214 carefully review available backup points and prune those that are
215 unneeded.  Then invoke with -a to move only the remaining "desired"
216 backup points into the archives.  Archived backups do not appear in any
217 listings or in the list of policy violators generated by the -sv option.
218 In effect, they are no longer "visible" to zetaback.
219
220 =item -b
221
222 Performs a backup.  This option will investigate all eligible hosts, query
223 the available filesystems from the remote agent and determine if any such
224 filesystems require a new full or incremental backup to be taken.  This
225 option may be combined with the -x option (to clean up afterwards.)
226
227 =item -ff
228
229 Forces a full backup to be taken on each filesystem encountered.  This is
230 used in combination with -b.  It is recommended to use this option only when
231 targeting specific filesystems (via the -h and -z options.)  Forcing a full
232 backup across all machines will cause staggered backups to coalesce and
233 could cause performance issues.
234
235 =item -fi
236
237 Forces an incremental backup to be taken on each filesystem encountered. 
238 This is used in combination with -b.  It is recommended to use this option
239 only when targeting specific filesystems (via the -h and -z options.)  Forcing
240 an incremental backup across all machines will cause staggered backups
241 to coalesce and could cause performance issues.
242
243 =item -x
244
245 Perform an expunge.  This option will determine which, if any, of the local
246 backups may be deleted given the retention policy specified in the
247 configuration.
248
249 =item -r
250
251 Perform a restore.  This option will operate on the specified backup and
252 restore it to the ZFS filesystem specified with -rzfs on the host specified
253 with the -rhost option.  The -h, -z and -t options may be used to filter
254 the source backup list.  If the filtered list contains more than one
255 source backup image, the command will act interactively.  If the -rhost
256 and -rzfs command are not specified, the command will act interactively.
257
258 =back
259
260 =cut
261
262 if($VERSION) {
263   print "zetaback: $version_string\n";
264   exit 0;
265 }
266
267 =pod
268
269 =head1 CONFIGURATION
270
271 The zetaback configuration file consists of a default stanza, containing
272 settings that can be overridden on a per-host basis.  A stanza begins
273 either with the string 'default', or a fully-qualified hostname, with
274 settings enclosed in braces ({}).  Single-line comments begin with a hash
275 ('#'), and whitespace is ignored, so feel free to indent for better
276 readability.  Every host to be backed up must have a host stanza in the
277 configuration file.
278
279 =head2 Settings
280
281 The following settings are valid in both the default and host scopes:
282
283 =over
284
285 =item store
286
287 The base directory under which to keep backups.  An interpolated variable
288 '%h' can be used, which expands to the hostname.  There is no default for
289 this setting.
290
291 =item archive
292
293 The base directory under which archives are stored.  The format is the same
294 as the store setting.  This is the destination to which files are relocated
295 when issuing an archive action (-a).
296
297 =item agent
298
299 The location of the zetaback_agent binary on the host.  There is no default
300 for this setting.
301
302 =item time_format
303
304 All timestamps within zetaback are in UNIX timestamp format.  This setting
305 provides a string for formatting all timestamps on output.  The sequences
306 available are identical to those in strftime(3).  If not specified, the
307 default is '%Y-%m-%d %H:%M:%S'.
308
309 =item backup_interval
310
311 The frequency (in seconds) at which to perform incremental backups.  An
312 incremental backup will be performed if the current time is more than
313 backup_interval since the last incremental backup.  If there is no full backup
314 for a particular filesystem, then a full backup is performed.  There is no
315 default for this setting.
316
317 =item full_interval
318
319 The frequency (in seconds) at which to perform full backups.  A full backup will
320 be performed if the current time is more than full_interval since the last full
321 backup.
322
323 =item retention
324
325 The retention time (in seconds) for backups.  This can be a simple number, in
326 which case all backups older than this will be expunged.
327
328 The retention specification can also be more complex, and consist of pairs of
329 values separated by a comma. The first value is a time period in seconds, and
330 the second value is how many backups should be retained within that period.
331 For example:
332
333 retention = 3600,4;86400,11
334
335 This will keep up to 4 backups for the first hour, and an additional 11
336 backups over 24 hours. The times do not stack. In other words, the 11 backups
337 would be kept during the period from 1 hour old to 24 hours old, or one every
338 2 hours.
339
340 Any backups older than the largest time given are deleted. In the above
341 example, all backups older than 24 hours are deleted.
342
343 If a second number is not specified, then all backups are kept within that
344 period.
345
346 Note: Full backups are never deleted if they are depended upon by an
347 incremental. In addition, the most recent backup is never deleted, regardless
348 of how old it is.
349
350 This value defaults to (14 * 86400), or two weeks.
351
352 =item compressionlevel
353
354 Compress files using gzip at the specified compression level. 0 means no
355 compression. Accepted values are 1-9. Defaults to 1 (fastest/minimal
356 compression.)
357
358 =item ssh_config
359
360 Full path to an alternate ssh client config.  This is useful for specifying a
361 less secure but faster cipher for some hosts, or using a different private
362 key.  There is no default for this setting.
363
364 =item dataset_backup
365
366 By default zetaback backs zfs filesystems up to files. This option lets you
367 specify that the backup go be stored as a zfs dataset on the backup host.
368
369 =item offline
370
371 Setting this option to 1 for a host will mark it as being 'offline'. Hosts
372 that are marked offline will not be backed up, will not have any old backups
373 expunged and will not be included in the list of policy violators. However,
374 the host will still be shown when listing backups and archiving.
375
376 =back
377
378 =head1 CONFIGURATION EXAMPLES
379
380 =head2 Uniform hosts
381
382 This config results in backups stored in /var/spool/zfs_backups, with a
383 subdirectory for each host.  Incremental backups will be performed
384 approximately once per day, assuming zetaback is run hourly.  Full backups
385 will be done once per week.  Time format and retention are default.
386
387   default {
388     store = /var/spool/zfs_backups/%h
389     agent = /usr/local/bin/zetaback_agent
390     backup_interval = 83000
391     full_interval = 604800
392   }
393
394   host1 {}
395
396   host2 {}
397
398 =head2 Non-uniform hosts
399
400 Here, host1's and host2's agents are found in different places, and host2's
401 backups should be stored in a different path.
402
403   default {
404     store = /var/spool/zfs_backups/%h
405     agent = /usr/local/bin/zetaback_agent
406     backup_interval = 83000
407     full_interval = 604800
408   }
409
410   host1 {
411     agent = /opt/local/bin/zetaback_agent
412   }
413
414   host2 {
415     store = /var/spool/alt_backups/%h
416     agent = /www/bin/zetaback_agent
417   }
418
419 =cut
420
421 # Make the parser more formal:
422 # config => stanza*
423 # stanza => string { kvp* }
424 # kvp    => string = string
425 my $str_re = qr/(?:"(?:\\\\|\\"|[^"])*"|\S+)/;
426 my $kvp_re = qr/($str_re)\s*=\s*($str_re)/;
427 my $stanza_re = qr/($str_re)\s*\{((?:\s*$kvp_re)*)\s*\}/;
428
429 sub parse_config() {
430   local($/);
431   $/ = undef;
432   open(CONF, "<$CONF") || die "Unable to open config file: $CONF";
433   my $file = <CONF>;
434   # Rip comments
435   $file =~ s/^\s*#.*$//mg;
436   while($file =~ m/$stanza_re/gm) {
437     my $scope = $1;
438     my $filepart = $2;
439     $scope =~ s/^"(.*)"$/$1/;
440     $conf{$scope} ||= {};
441     while($filepart =~ m/$kvp_re/gm) {
442       my $key = $1;
443       my $value = $2;
444       $key =~ s/^"(.*)"$/$1/;
445       $value =~ s/^"(.*)"$/$1/;
446       $conf{$scope}->{lc($key)} = $value;
447     }
448   }
449   close(CONF);
450 }
451 sub config_get($$;$) {
452   # Params: host, key, class
453   if ($_[2]) {
454     return $conf{$_[0]}->{$_[1]} || $conf{$_[2]}->{$_[1]} ||
455         $conf{'default'}->{$_[1]};
456   } else {
457     return $conf{$_[0]}->{$_[1]} || $conf{'default'}->{$_[1]};
458   }
459 }
460
461 sub get_store($;$) {
462   my ($host, $class) = @_;
463   my $store = config_get($host, 'store', $class);
464   $store =~ s/%h/$host/g;;
465   mkdir $store if(! -d $store);
466   return $store;
467 }
468
469 sub fs_encode($) {
470   my $d = shift;
471   my @parts = split('@', $d);
472   my $e = encode_base64($parts[0], '');
473   $e =~ s/\//_/g;
474   $e =~ s/=/-/g;
475   $e =~ s/\+/\./g;
476   if (exists $parts[1]) {
477     $e .= "\@$parts[1]";
478   }
479   return $e;
480 }
481 sub fs_decode($) {
482   my $e = shift;
483   $e =~ s/_/\//g;
484   $e =~ s/-/=/g;
485   $e =~ s/\./\+/g;
486   return decode_base64($e);
487 }
488 sub dir_encode($) {
489   my $d = shift;
490   my $e = encode_base64($d, '');
491   $e =~ s/\//_/;
492   return $e;
493 }
494 sub dir_decode($) {
495   my $e = shift;
496   $e =~ s/_/\//;
497   return decode_base64($e);
498 }
499 sub pretty_size($) {
500   my $bytes = shift;
501   if($bytes > 1024*1024*1024) {
502     return sprintf("%0.2f Gb", $bytes / (1024*1024*1024));
503   }
504   if($bytes > 1024*1024) {
505     return sprintf("%0.2f Mb", $bytes / (1024*1024));
506   }
507   if($bytes > 1024) {
508     return sprintf("%0.2f Kb", $bytes / (1024));
509   }
510   return "$bytes b";
511 }
512 sub lock($;$$) {
513   my ($host, $file, $nowait) = @_;
514   print "Acquiring lock for $host:$file\n" if($DEBUG);
515   $file ||= 'master.lock';
516   my $store = get_store($host); # Don't take classes into account - not needed
517   return 1 if(exists($locks{"$host:$file"}));
518   open(LOCK, "+>>$store/$file") || return 0;
519   unless(flock(LOCK, LOCK_EX | ($nowait ? LOCK_NB : 0))) {
520     close(LOCK);
521     return 0;
522   }
523   $locks{"$host:$file"} = \*LOCK;
524   return 1;
525 }
526 sub unlock($;$$) {
527   my ($host, $file, $remove) = @_;
528   print "Releasing lock for $host:$file\n" if($DEBUG);
529   $file ||= 'master.lock';
530   my $store = get_store($host); # Don't take classes into account - not needed
531   return 0 unless(exists($locks{"$host:$file"}));
532   *UNLOCK = $locks{$file};
533   unlink("$store/$file") if($remove);
534   flock(UNLOCK, LOCK_UN);
535   close(UNLOCK);
536   return 1;
537 }
538 sub scan_for_backups($) {
539   my %info = ();
540   my $dir = shift;
541   $info{last_full} = $info{last_incremental} = $info{last_backup} = 0;
542   # Look for standard file based backups first
543   opendir(D, $dir) || return \%info;
544   foreach my $file (readdir(D)) {
545     if($file =~ /^(\d+)\.([^\.]+)\.full$/) {
546       my $whence = $1;
547       my $fs = dir_decode($2);
548       $info{$fs}->{full}->{$whence}->{'file'} = "$dir/$file";
549       $info{$fs}->{last_full} = $whence if($whence > $info{$fs}->{last_full});
550       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
551                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
552     }
553     elsif($file =~ /^(\d+).([^\.]+)\.incremental.(\d+)$/) {
554       my $whence = $1;
555       my $fs = dir_decode($2);
556       $info{$fs}->{incremental}->{$whence}->{'depends'} = $3;
557       $info{$fs}->{incremental}->{$whence}->{'file'} = "$dir/$file";
558       $info{$fs}->{last_incremental} = $whence if($whence > $info{$fs}->{last_incremental});
559       $info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
560                                      $info{$fs}->{last_incremental} : $info{$fs}->{last_full};
561     }
562   }
563   closedir(D);
564   # Now look for zfs based backups
565   my $storefs;
566   eval {
567     $storefs = get_fs_from_mountpoint($dir);
568   };
569   return \%info if ($@);
570   my $rv = open(ZFSLIST, "__ZFS__ list -H -r -t snapshot $storefs |");
571   return \%info unless $rv;
572   while (<ZFSLIST>) {
573       my @F = split(' ');
574       my ($rawfs, $snap) = split('@', $F[0]);
575       my ($whence) = ($snap =~ /(\d+)/);
576       next unless $whence;
577       my @fsparts = split('/', $rawfs);
578       my $fs = fs_decode($fsparts[-1]);
579       # Treat a dataset backup as a full backup from the point of view of the
580       # backup lists
581       $info{$fs}->{full}->{$whence}->{'snapshot'} = $snap;
582       $info{$fs}->{full}->{$whence}->{'dataset'} = "$rawfs\@$snap";
583       # Note - this field isn't set for file backups - we probably should do
584       # this
585       $info{$fs}->{full}->{$whence}->{'pretty_size'} = "$F[1]";
586       $info{$fs}->{last_full} = $whence if ($whence >
587           $info{$fs}->{last_full});
588       $info{$fs}->{last_backup} = $whence if ($whence >
589           $info{$fs}->{last_backup});
590   }
591   close(ZFSLIST);
592
593   return \%info;
594 }
595
596 parse_config();
597
598 sub zetaback_log($$;@) {
599   my ($host, $mess, @args) = @_;
600   my $tf = config_get($host, 'time_format');
601   my $file = config_get($host, 'logfile');
602   my $fileh;
603   if(defined($file)) {
604     $fileh = IO::File->new(">>$file");
605   }
606   $fileh ||= IO::File->new(">&STDERR");
607   printf $fileh "%s: $mess", strftime($tf, localtime(time)), @args;
608   $fileh->close();
609 }
610
611 sub zfs_remove_snap($$$) {
612   my ($host, $fs, $snap) = @_;
613   my $agent = config_get($host, 'agent');
614   my $ssh_config = config_get($host, 'ssh_config');
615   $ssh_config = "-F $ssh_config" if($ssh_config);
616   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
617   return unless($snap);
618   print "Dropping $snap on $fs\n" if($DEBUG);
619   `ssh $ssh_config $host $agent -z $fs -d $snap`;
620 }
621
622 # Lots of args.. internally called.
623 sub zfs_do_backup($$$$$$;$) {
624   my ($host, $fs, $type, $point, $store, $dumpname, $base) = @_;
625   my ($storefs, $encodedname);
626   my $agent = config_get($host, 'agent');
627   my $ssh_config = config_get($host, 'ssh_config');
628   $ssh_config = "-F $ssh_config" if($ssh_config);
629   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
630
631   # compression is meaningless for dataset backups
632   if ($type ne "s") {
633     my $cl = config_get($host, 'compressionlevel');
634     if ($cl >= 1 && $cl <= 9) {
635         open(LBACKUP, "|gzip -$cl >$store/.$dumpname") ||
636         die "zfs_full_backup: cannot create dump\n";
637     } else {
638         open(LBACKUP, ">$store/.$dumpname") ||
639         die "zfs_full_backup: cannot create dump\n";
640     }
641   } else {
642     # Dataset backup - pipe received filesystem to zfs recv
643     eval {
644       $storefs = get_fs_from_mountpoint($store);
645     };
646     if ($@) {
647       # The zfs filesystem doesn't exist, so we have to work out what it
648       # would be
649       my $basestore = config_get($host, 'store');
650       $basestore =~ s/\/?%h//g;
651       $storefs = get_fs_from_mountpoint($basestore);
652       $storefs="$storefs/$host";
653     }
654     $encodedname = fs_encode($dumpname);
655     print STDERR "Receiving to zfs filesystem $storefs/$encodedname\n"
656       if($DEBUG);
657     zfs_create_intermediate_filesystems("$storefs/$encodedname");
658     open(LBACKUP, "|__ZFS__ recv $storefs/$encodedname");
659   }
660   # Do it. yeah.
661   eval {
662     if(my $pid = fork()) {
663       close(LBACKUP);
664       waitpid($pid, 0);
665       die "error: $?" if($?);
666     }
667     else {
668       my @cmd = ('ssh', split(/ /, $ssh_config), $host, $agent, '-z', $fs);
669       if ($type eq "i" || ($type eq "s" && $base)) {
670         push @cmd, ("-i", $base);
671       }
672       if ($type eq "f" || $type eq "s") {
673         push @cmd, ("-$type", $point);
674       }
675       open STDIN, "/dev/null" || exit(-1);
676       open STDOUT, ">&LBACKUP" || exit(-1);
677       print STDERR "   => @cmd\n" if($DEBUG);
678       exec { $cmd[0] } @cmd;
679       print STDERR "$cmd[0] failed: $?\n";
680       exit($?);
681     }
682     if ($type ne "s") {
683       die "dump failed (zero bytes)\n" if(-z "$store/.$dumpname");
684       rename("$store/.$dumpname", "$store/$dumpname") || die "cannot rename dump\n";
685     } else {
686       # Check everything is ok
687       `__ZFS__ list $storefs/$encodedname`;
688       die "dump failed (received snapshot $storefs/$encodedname does not exist)\n"
689         if $?;
690     }
691   };
692   if($@) {
693     if ($type ne "s") {
694         unlink("$store/.$dumpname");
695     }
696     chomp(my $error = $@);
697     $error =~ s/[\r\n]+/ /gsm;
698     zetaback_log($host, "FAILED[$error] $host:$fs $type\n");
699     die "zfs_full_backup: failed $@";
700   }
701   my $size;
702   if ($type ne "s") {
703     my @st = stat("$store/$dumpname");
704     $size = pretty_size($st[7]);
705   } else {
706     $size = `__ZFS__ get -Ho value used $storefs/$encodedname`;
707     chomp $size;
708   }
709   zetaback_log($host, "SUCCESS[$size] $host:$fs $type\n");
710 }
711
712 sub zfs_create_intermediate_filesystems($) {
713   my ($fs) = @_;
714   my $idx=0;
715   while (($idx = index($fs, '/', $idx+1)) != -1) {
716       my $fspart = substr($fs, 0, $idx);
717       `__ZFS__ list $fspart 2>&1`;
718       if ($?) {
719         print STDERR "Creating intermediate zfs filesystem: $fspart\n"
720           if $DEBUG;
721         `__ZFS__ create $fspart`;
722       }
723   }
724 }
725
726 sub zfs_full_backup($$$) {
727   my ($host, $fs, $store) = @_;
728
729   # Translate into a proper dumpname
730   my $point = time();
731   my $efs = dir_encode($fs);
732   my $dumpname = "$point.$efs.full";
733
734   zfs_do_backup($host, $fs, 'f', $point, $store, $dumpname);
735 }
736
737 sub zfs_incremental_backup($$$$) {
738   my ($host, $fs, $base, $store) = @_;
739   my $agent = config_get($host, 'agent');
740
741   # Translate into a proper dumpname
742   my $point = time();
743   my $efs = dir_encode($fs);
744   my $dumpname = "$point.$efs.incremental.$base";
745
746   zfs_do_backup($host, $fs, 'i', $point, $store, $dumpname, $base);
747 }
748
749 sub zfs_dataset_backup($$$$) {
750   my ($host, $fs, $base, $store) = @_;
751   my $agent = config_get($host, 'agent');
752
753   my $point = time();
754   my $dumpname = "$fs\@$point";
755
756   zfs_do_backup($host, $fs, 's', $point, $store, $dumpname, $base);
757 }
758
759 sub perform_retention($$) {
760   my ($host, $store) = @_;
761   my $backup_info = scan_for_backups($store);
762   my $retention = config_get($host, 'retention');
763   my $now = time();
764
765   if ($DEBUG) {
766     print "Performing retention for $host\n";
767   }
768
769   foreach my $disk (sort keys %{$backup_info}) {
770     my $info = $backup_info->{$disk};
771     next unless(ref($info) eq 'HASH');
772     my %must_save;
773
774     if ($DEBUG) {
775       print "   $disk\n";
776     }
777
778     # Get a list of all the full and incrementals, sorts newest to oldest
779     my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
780     @backup_points = sort { $b <=> $a } @backup_points;
781
782     # We _cannot_ throw away _all_ our backups,
783     # so save the most recent incremental and full no matter what
784     push(@{$must_save{$backup_points[0]}}, "most recent backup");
785     my @fulls = grep { exists($info->{full}->{$_}) } @backup_points;
786     push(@{$must_save{$fulls[0]}}, "most recent full");
787
788     # Process retention policy
789     my @parts = split(/;/, $retention);
790     my %retention_map;
791     foreach (@parts) {
792       my ($period, $amount) = split(/,/);
793       if (!defined($amount)) {
794         $amount = -1;
795       }
796       $retention_map{$period} = $amount;
797     }
798     my @periods = sort { $a <=> $b } keys(%retention_map);
799     my %backup_bins;
800     foreach(@periods) {
801       $backup_bins{$_} = ();
802     }
803     my $cutoff = $now - $periods[0];
804     # Sort backups into time period sections
805     foreach (@backup_points) {
806       # @backup_points is in descending order (newest first)
807       while ($_ <= $cutoff) {
808         # Move to the next largest bin if the current backup is not in the
809         # current bin. However, if there is no larger bin, then don't
810         shift(@periods);
811         if (@periods) {
812           $cutoff = $now - $periods[0];
813         } else {
814           last;
815         }
816       }
817       # Throw away all backups older than the largest time period specified
818       if (!@periods) {
819         last;
820       }
821       push(@{$backup_bins{$periods[0]}}, $_);
822     }
823     foreach (keys(%backup_bins)) {
824       my $keep = $retention_map{$_}; # How many backups to keep
825       if ($backup_bins{$_}) {
826         my @backups = @{$backup_bins{$_}};
827         my $total = @backups;  # How many backups we have
828         # If we didn't specify how many to keep, keep them all
829         if ($keep == -1) { $keep = $total };
830         # If we have less backups than we should keep, keep them all
831         if ($total < $keep) { $keep = $total };
832         for (my $i = 1; $i <= $keep; $i++) {
833           my $idx = int(($i * $total) / $keep) - 1;
834           push(@{$must_save{$backups[$idx]}}, "retention policy - $_");
835         }
836       }
837     }
838     if ($DEBUG) {
839       print "    => Backup bins:\n";
840       foreach my $a (keys(%backup_bins)) {
841         print "      => $a\n";
842         foreach my $i (@{$backup_bins{$a}}) {
843           my $trans = $now - $i;
844           print "         => $i ($trans seconds old)";
845           if (exists($must_save{$i})) { print " => keep" };
846           print "\n";
847         }
848       }
849     }
850
851     # Look for dependencies
852     foreach (@backup_points) {
853       if(exists($info->{incremental}->{$_})) {
854         print "   => $_ depends on $info->{incremental}->{$_}->{depends}\n" if($DEBUG);
855         if (exists($must_save{$_})) {
856           push(@{$must_save{$info->{incremental}->{$_}->{depends}}},
857             "dependency");
858         }
859       }
860     }
861
862     my @removals = grep { !exists($must_save{$_}) } @backup_points;
863     if($DEBUG) {
864       my $tf = config_get($host, 'time_format');
865       print "    => Candidates for removal:\n";
866       foreach (@backup_points) {
867         print "      => ". strftime($tf, localtime($_));
868         print " ($_)";
869         print " [". (exists($info->{full}->{$_}) ? "full":"incremental") ."]";
870         if (exists($must_save{$_})) {
871           my $reason = join(", ", @{$must_save{$_}});
872           print " => keep ($reason)";
873         } else {
874           print " => remove";
875         }
876         print "\n";
877       }
878     }
879     foreach (@removals) {
880       my $efs = dir_encode($disk);
881       my $filename;
882       my $dataset;
883       if(exists($info->{full}->{$_}->{file})) {
884         $filename = $info->{full}->{$_}->{file};
885       } elsif(exists($info->{incremental}->{$_}->{file})) {
886         $filename = $info->{incremental}->{$_}->{file};
887       } elsif(exists($info->{full}->{$_}->{dataset})) {
888         $dataset = $info->{full}->{$_}->{dataset};
889       } elsif(exists($info->{incremental}->{$_}->{dataset})) {
890         $dataset = $info->{incremental}->{$_}->{dataset};
891       } else {
892         print "ERROR: We tried to expunge $host $disk [$_], but couldn't find it.\n";
893       }
894       print "    => expunging ${filename}${dataset}\n" if($DEBUG);
895       unless($NEUTERED) {
896         if ($filename) {
897           unlink($filename) || print "ERROR: unlink $filename: $?\n";
898         } elsif ($dataset) {
899           `__ZFS__ destroy $dataset`;
900           if ($?) {
901             print "ERROR: zfs destroy $dataset: $?\n";
902           }
903         }
904       }
905     }
906   }
907 }
908
909 sub __default_sort($$) { return $_[0] cmp $_[1]; }
910    
911 sub choose($$;$) {
912   my($name, $obj, $sort) = @_;
913   $sort ||= \&__default_sort;;
914   my @list;
915   my $hash;
916   if(ref $obj eq 'ARRAY') {
917     @list = sort { $sort->($a,$b); } (@$obj);
918     map { $hash->{$_} = $_; } @list;
919   }
920   elsif(ref $obj eq 'HASH') {
921     @list = sort { $sort->($a,$b); } (keys %$obj);
922     $hash = $obj;
923   }
924   else {
925     die "choose passed bad object: " . ref($obj) . "\n";
926   }
927   return $list[0] if(scalar(@list) == 1);
928   print "\n";
929   my $i = 1;
930   for (@list) {
931     printf " %3d) $hash->{$_}\n", $i++;
932   }
933   my $selection = 0;
934   while($selection !~ /^\d+$/ or
935         $selection < 1 or
936         $selection >= $i) {
937     print "$name: ";
938     chomp($selection = <>);
939   }
940   return $list[$selection - 1];
941 }
942
943 sub backup_chain($$) {
944   my ($info, $ts) = @_;
945   my @list;
946   push @list, $info->{full}->{$ts} if(exists($info->{full}->{$ts}));
947   if(exists($info->{incremental}->{$ts})) {
948     push @list, $info->{incremental}->{$ts};
949     push @list, backup_chain($info, $info->{incremental}->{$ts}->{depends});
950   }
951   return @list;
952 }
953
954 sub get_fs_from_mountpoint($) {
955     my ($mountpoint) = @_;
956     my $fs;
957     my $rv = open(ZFSLIST, "__ZFS__ list -t filesystem -H |");
958     die "Unable to determine zfs filesystem for $mountpoint" unless $rv;
959     while (<ZFSLIST>) {
960         my @F = split(' ');
961         if ($F[-1] eq $mountpoint) {
962             $fs = $F[0];
963             last;
964         }
965     }
966     close(ZFSLIST);
967     die "Unable to determine zfs filesystem for $mountpoint" unless $fs;
968     return $fs;
969 }
970
971 sub perform_restore() {
972   my %source;
973
974   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
975       keys %conf) {
976     # If -h was specific, we will skip this host if the arg isn't
977     # an exact match or a pattern match
978     if($HOST &&
979        !(($HOST eq $host) ||
980          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
981       next;
982     }
983
984     my $store = get_store($host);
985
986     my $backup_info = scan_for_backups($store);
987     foreach my $disk (sort keys %{$backup_info}) {
988       my $info = $backup_info->{$disk};
989       next unless(ref($info) eq 'HASH');
990       next
991         if($ZFS &&      # if the pattern was specified it could
992            !($disk eq $ZFS ||        # be a specific match or a
993              ($ZFS =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
994       # We want to see this one
995       my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
996       my @source_points;
997       foreach (@backup_points) {
998         push @source_points, $_ if(!$TIMESTAMP || $TIMESTAMP == $_)
999       }
1000       if(@source_points) {
1001         $source{$host}->{$disk} = \@source_points;
1002       }
1003     }
1004   }
1005
1006   if(! keys %source) {
1007     print "No matching backups found\n";
1008     return;
1009   }
1010
1011   # Here goes the possibly interactive dialog
1012   my $host = choose("Restore from host",  [keys %source]);
1013   my $disk = choose("Restore from ZFS", [keys %{$source{$host}}]);
1014  
1015   # Times are special.  We build a human readable form and use a numerical
1016   # sort function instead of the default lexical one.
1017   my %times;
1018   my $tf = config_get($host, 'time_format');
1019   map { $times{$_} = strftime($tf, localtime($_)); } @{$source{$host}->{$disk}};
1020   my $timestamp = choose("Restore as of timestamp", \%times,
1021                          sub { $_[0] <=> $_[1]; });
1022
1023   my $store = get_store($host);
1024   my $backup_info = scan_for_backups($store);
1025   my @backup_list = reverse backup_chain($backup_info->{$disk}, $timestamp);
1026
1027   if(!$RESTORE_HOST) {
1028     print "Restore to host [$host]:";
1029     chomp(my $input = <>);
1030     $RESTORE_HOST = length($input) ? $input : $host;
1031   }
1032   if(!$RESTORE_ZFS) {
1033     print "Restore to zfs [$disk]:";
1034     chomp(my $input = <>);
1035     $RESTORE_ZFS = length($input) ? $input : $disk;
1036   }
1037
1038   # show intentions
1039   print "Going to restore:\n";
1040   print "\tfrom: $host\n";
1041   print "\tfrom: $disk\n";
1042   print "\t  at: $timestamp [" . strftime($tf, localtime($timestamp)) . "]\n";
1043   print "\t  to: $RESTORE_HOST\n";
1044   print "\t  to: $RESTORE_ZFS\n";
1045   print "\n";
1046
1047   foreach(@backup_list) {
1048     $_->{success} = zfs_restore_part($RESTORE_HOST, $RESTORE_ZFS, $_->{file}, $_->{dataset}, $_->{depends});
1049   }
1050 }
1051
1052 sub zfs_restore_part($$$$;$) {
1053   my ($host, $fs, $file, $dataset, $dep) = @_;
1054   unless ($file || $dataset) {
1055     print STDERR "=> No dataset or filename given to restore. Bailing out.";
1056     return 1;
1057   }
1058   my $ssh_config = config_get($host, 'ssh_config');
1059   $ssh_config = "-F $ssh_config" if($ssh_config);
1060   print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
1061   my $command;
1062   if(exists($conf{$host})) {
1063     my $agent = config_get($host, 'agent');
1064     $command = "$agent -r -z $fs";
1065     $command .= " -b $dep" if($dep);
1066   }
1067   else {
1068     $command = "__ZFS__ recv $fs";
1069   }
1070   if ($file) {
1071     print " => piping $file to $command\n" if($DEBUG);
1072     print "gzip -dfc $file | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1073   } elsif ($dataset) {
1074     print " => piping $dataset to $command using zfs send\n" if ($DEBUG);
1075     print "zfs send $dataset | ssh $ssh_config $host $command\n" if ($DEBUG && $NEUTERED);
1076   }
1077   unless($NEUTERED) {
1078     if ($file) {
1079       open(DUMP, "gzip -dfc $file |");
1080     } elsif ($dataset) {
1081       open(DUMP, "__ZFS__ send $dataset |");
1082     }
1083     eval {
1084       open(RECEIVER, "| ssh $ssh_config $host $command");
1085       my $buffer;
1086       while(my $len = sysread(DUMP, $buffer, $BLOCKSIZE)) {
1087         if(syswrite(RECEIVER, $buffer, $len) != $len) {
1088           die "$!";
1089         }
1090       }
1091     };
1092     close(DUMP);
1093     close(RECEIVER);
1094   }
1095   return $?;
1096 }
1097
1098 sub pretty_print_backup($$$) {
1099   my ($info, $host, $point) = @_;
1100   my $tf = config_get($host, 'time_format');
1101   print "\t" . strftime($tf, localtime($point)) . " [$point] ";
1102   if(exists($info->{full}->{$point})) {
1103     if ($info->{full}->{$point}->{file}) {
1104       my @st = stat($info->{full}->{$point}->{file});
1105       print "FULL " . pretty_size($st[7]);
1106       print "\n\tfile: $info->{full}->{$point}->{file}" if($SHOW_FILENAMES);
1107     } elsif ($info->{full}->{$point}->{dataset}) {
1108       print "FULL $info->{full}->{$point}->{pretty_size}";
1109       print "\n\tdataset: $info->{full}->{$point}->{dataset}"
1110         if($SHOW_FILENAMES);
1111     }
1112   } else {
1113     my @st = stat($info->{incremental}->{$point}->{file});
1114     print "INCR from [$info->{incremental}->{$point}->{depends}] " . pretty_size($st[7]);
1115     print "\n\tfile: $info->{incremental}->{$point}->{file}" if($SHOW_FILENAMES);
1116   }
1117   print "\n";
1118 }
1119
1120 sub show_backups($$$) {
1121   my ($host, $store, $diskpat) = @_;
1122   my $backup_info = scan_for_backups($store);
1123   my $tf = config_get($host, 'time_format');
1124   my (@files, @datasets);
1125   foreach my $disk (sort keys %{$backup_info}) {
1126     my $info = $backup_info->{$disk};
1127     next unless(ref($info) eq 'HASH');
1128     next
1129       if($diskpat &&      # if the pattern was specified it could
1130          !($disk eq $diskpat ||        # be a specific match or a
1131            ($diskpat =~ /^\/(.+)\/$/ && $disk =~ /$1/))); # regex
1132
1133     my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
1134     @backup_points = sort { $a <=> $b } @backup_points;
1135     @backup_points = (pop @backup_points) unless ($ARCHIVE || $SUMMARY_EXT || $SUMMARY_VIOLATORS);
1136
1137     # Quick short-circuit in the case of retention violation checks
1138     if($SUMMARY_VIOLATORS) {
1139       if(time() > $info->{last_full} + config_get($host, 'full_interval') ||
1140          time() > $info->{last_backup} + config_get($host, 'backup_interval')) {
1141         print "$host:$disk\n";
1142         pretty_print_backup($info, $host, $info->{last_full});
1143         # Only print the last backup if it isn't the same as the last full
1144         if ($info->{last_full} != $info->{last_backup}) {
1145             pretty_print_backup($info, $host, $info->{last_backup});
1146         }
1147       }
1148       next;
1149     }
1150
1151     # We want to see this one
1152     print "$host:$disk\n";
1153     next unless($SUMMARY || $SUMMARY_EXT || $ARCHIVE);
1154     if($SUMMARY_EXT) {
1155       print "\tLast Full: ". ($info->{last_full} ? strftime($tf, localtime($info->{last_full})) : "Never") . "\n";
1156       if($info->{last_full} < $info->{last_incremental}) {
1157         print "\tLast Incr: ". strftime($tf, localtime($info->{last_incremental})). "\n";
1158       }
1159     }
1160     foreach (@backup_points) {
1161       pretty_print_backup($info, $host, $_);
1162       if(exists($info->{full}->{$_}->{file})) {
1163         push @files, $info->{full}->{$_}->{file};
1164       } elsif(exists($info->{incremental}->{$_}->{file})) {
1165         push @files, $info->{incremental}->{$_}->{file};
1166       } elsif(exists($info->{full}->{$_}->{dataset})) {
1167         push @datasets, $info->{full}->{$_}->{dataset}
1168       }
1169     }
1170     print "\n";
1171   }
1172   if($ARCHIVE && (scalar(@files) || scalar(@datasets))) {
1173     print "\nAre you sure you would like to archive ".scalar(@files).
1174       " file(s) and ".scalar(@datasets)." dataset(s)? ";
1175     while(($_ = <>) !~ /(?:y|n|yes|no)$/i) {
1176       print "\nAre you sure you would like to archive ".scalar(@files).
1177         " file(s) and ".scalar(@datasets)." dataset(s)? ";
1178     }
1179     if(/^y/i) {
1180       if (@files) {
1181         my $archive = config_get($host, 'archive');
1182         $archive =~ s/%h/$host/g;
1183         if(! -d $archive) {
1184           mkdir $archive || die "Cannot mkdir($archive)\n";
1185         }
1186         foreach my $file (@files) {
1187           (my $afile = $file) =~ s/^$store/$archive/;
1188           move($file, $afile) || print "Error archiving $file: $!\n";
1189         }
1190       }
1191       if (@datasets) {
1192         my $archive = config_get($host, 'archive');
1193         my $storefs = get_fs_from_mountpoint($store);
1194         (my $basearchive = $archive) =~ s/\/?%h//g;
1195         my $basearchivefs;
1196         eval {
1197           $basearchivefs = get_fs_from_mountpoint($basearchive);
1198         };
1199         die "Unable to find archive filesystem. The archive directory must be the root of a zfs filesystem to archive datasets." if $@;
1200         my $archivefs = "$basearchivefs/$host";
1201         `__ZFS__ create $archivefs`; # We don't care if this fails
1202         my %seen = ();
1203         foreach my $dataset (@datasets) {
1204           $dataset =~ s/@.*$//; # Only rename filesystems, not snapshots
1205           next if $seen{$dataset}++; # Only rename a filesystem once
1206           (my $adataset = $dataset) =~ s/^$storefs/$archivefs/;
1207           `__ZFS__ rename $dataset $adataset`;
1208           if ($?) {
1209             print "Error archiving $dataset\n";
1210           }
1211         }
1212       }
1213     }
1214   }
1215 }
1216
1217 sub plan_and_run($$) {
1218   my ($host, $diskpat) = @_;
1219   my $store;
1220   my $ssh_config = config_get($host, 'ssh_config');
1221   $ssh_config = "-F $ssh_config" if($ssh_config);
1222   my %suppress;
1223   print "Planning '$host'\n" if($DEBUG);
1224   my $agent = config_get($host, 'agent');
1225   my $took_action = 1;
1226   while($took_action) {
1227     $took_action = 0;
1228     my @disklist;
1229
1230     # We need a lock for the listing.
1231     return unless(lock($host, ".list"));
1232
1233     # Get list of zfs filesystems from the agent
1234     open(SILENT, ">&", \*STDERR);
1235     close(STDERR);
1236     my $rv = open(ZFSLIST, "ssh $ssh_config $host $agent -l |");
1237     open(STDERR, ">&", \*SILENT);
1238     close(SILENT);
1239     next unless $rv;
1240     @disklist = grep { chomp } (<ZFSLIST>);
1241     close(ZFSLIST);
1242
1243     if ($DEBUG) {
1244       print " => Filesystems for $host (zetaback_agent -l output)\n";
1245       foreach my $diskline (@disklist) {
1246         print "    $diskline\n";
1247       }
1248     }
1249
1250     foreach my $diskline (@disklist) {
1251       chomp($diskline);
1252       next unless($diskline =~ /^(\S+) \[([^\]]*)\](?: {([^}]*}))?/);
1253       my $diskname = $1;
1254       my %snaps;
1255       map { $snaps{$_} = 1 } (split(/,/, $2));
1256       my $class = $3;
1257       $store = get_store($host, $class);
1258  
1259       # We've just done this.
1260       next if($suppress{"$host:$diskname"});
1261       # If we are being selective (via -z) now is the time.
1262       next
1263         if($diskpat &&          # if the pattern was specified it could
1264            !($diskname eq $diskpat ||        # be a specific match or a
1265              ($diskpat =~ /^\/(.+)\/$/ && $diskname =~ /$1/))); # regex
1266  
1267       print " => Scanning '$store' for old backups of '$diskname'.\n" if($DEBUG);
1268
1269       # Make directory on demand
1270       my $backup_info = scan_for_backups($store);
1271       # That gave us info on all backups, we just want this disk
1272       $backup_info = $backup_info->{$diskname} || {};
1273  
1274       # Should we do a backup?
1275       my $backup_type = 'no';
1276       if(time() > $backup_info->{last_backup} + config_get($host,
1277           'backup_interval', $class)) {
1278         $backup_type = 'incremental';
1279       }
1280       if(time() > $backup_info->{last_full} + config_get($host,
1281           'full_interval', $class)) {
1282         $backup_type = 'full';
1283       }
1284       # If we want an incremental, but have no full, then we need to upgrade to full
1285       if($backup_type eq 'incremental') {
1286         my $have_full_locally = 0;
1287         # For each local full backup, see if the full backup still exists on the other end.
1288         foreach (keys %{$backup_info->{'full'}}) {
1289           $have_full_locally = 1 if(exists($snaps{'__zb_full_' . $_}));
1290         }
1291         $backup_type = 'full' unless($have_full_locally);
1292       }
1293       $backup_type = 'full' if($FORCE_FULL);
1294       $backup_type = 'incremental' if($FORCE_INC);
1295       $backup_type = 'dataset' if(config_get($host, 'dataset_backup', $class)
1296         eq 1 && $backup_type ne 'no');
1297
1298       print " => doing $backup_type backup\n" if($DEBUG);
1299       # We need to drop a __zb_base snap or a __zb_incr snap before we proceed
1300       unless($NEUTERED || $backup_type eq 'no') {
1301         # attempt to lock this action, if it fails, skip -- someone else is working it.
1302         next unless(lock($host, dir_encode($diskname), 1));
1303         unlock($host, '.list');
1304
1305         if($backup_type eq 'full') {
1306           eval { zfs_full_backup($host, $diskname, $store); };
1307           if ($@) {
1308             chomp(my $err = $@);
1309             print " => failure $err\n";
1310           }
1311           else {
1312             # Unless there was an error backing up, remove all the other full snaps
1313             foreach (keys %snaps) {
1314               zfs_remove_snap($host, $diskname, $_) if(/^__zb_full_(\d+)/)
1315             }
1316           }
1317           $took_action = 1;
1318         }
1319         if($backup_type eq 'incremental') {
1320           eval {
1321             zfs_remove_snap($host, $diskname, '__zb_incr') if($snaps{'__zb_incr'});
1322             # Find the newest full from which to do an incremental (NOTE: reverse numeric sort)
1323             my @fulls = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1324             zfs_incremental_backup($host, $diskname, $fulls[0], $store);
1325           };
1326           if ($@) {
1327             chomp(my $err = $@);
1328             print " => failure $err\n";
1329           }
1330           else {
1331             $took_action = 1;
1332           }
1333         }
1334         if($backup_type eq 'dataset') {
1335           my @backups = sort { $b <=> $a } (keys %{$backup_info->{'full'}});
1336           eval { zfs_dataset_backup($host, $diskname, $backups[0], $store); };
1337           if ($@) {
1338             chomp(my $err = $@);
1339             print " => failure $err\n";
1340           }
1341           else {
1342             # Unless there was an error backing up, remove all the other dset snaps
1343             foreach (keys %snaps) {
1344               zfs_remove_snap($host, $diskname, $_) if(/^__zb_dset_(\d+)/)
1345             }
1346           }
1347           $took_action = 1;
1348         }
1349         unlock($host, dir_encode($diskname), 1);
1350       }
1351       $suppress{"$host:$diskname"} = 1;
1352       last if($took_action);
1353     }
1354     unlock($host, '.list');
1355   }
1356 }
1357
1358 if($RESTORE) {
1359   perform_restore();
1360 }
1361 else {
1362   foreach my $host (grep { $_ ne "default" && $conf{$_}->{"type"} ne "class"}
1363       keys %conf) {
1364     # If -h was specific, we will skip this host if the arg isn't
1365     # an exact match or a pattern match
1366     if($HOST &&
1367        !(($HOST eq $host) ||
1368          ($HOST =~ /^\/(.*)\/$/ && $host =~ /$1/))) {
1369       next;
1370     }
1371
1372     # Skip if the host is marked as 'offline' and we are not listing backups
1373     if (config_get($host, 'offline') == 1 &&
1374         !$LIST && !$SUMMARY && !$SUMMARY_EXT && !$ARCHIVE) {
1375       next;
1376     }
1377
1378     # TODO - remove once class functionality is completed
1379     my $store = get_store($host, '');
1380  
1381     if($LIST || $SUMMARY || $SUMMARY_EXT || $SUMMARY_VIOLATORS || $ARCHIVE) {
1382       show_backups($host, $store, $ZFS);
1383     }
1384     if($BACKUP) {
1385       plan_and_run($host, $ZFS);
1386     }
1387     if($EXPUNGE) {
1388       perform_retention($host, $store);
1389     }
1390   }
1391 }
1392
1393 exit 0;
1394
1395 =pod
1396
1397 =head1 FILES
1398
1399 =over
1400
1401 =item zetaback.conf
1402
1403 The main zetaback configuration file.  The location of the file can be
1404 specified on the command line with the -c flag.  The prefix of this
1405 file may also be specified as an argument to the configure script.
1406
1407 =back
1408
1409 =head1 SEE ALSO
1410
1411 zetaback_agent(1)
1412
1413 =cut
Note: See TracBrowser for help on using the browser.