root/trunk/tools/fast.dump.and.restore/fast.restore

Revision 262, 20.3 kB (checked in by depesz, 3 years ago)

some cleanup, grouping, added timing information

  • Property svn:executable set to *
Line 
1 #!/usr/bin/env perl
2 package main;
3 use strict;
4 use warnings;
5 my $program = Omni::Program::Pg::FastRestore->new();
6 $program->run();
7 exit;
8
9 package Omni::Program::Pg::FastRestore;
10 use strict;
11 use warnings;
12 use Carp qw( croak carp );
13 use English qw( -no_match_vars );
14 use Getopt::Long qw( :config no_ignore_case );
15 use Data::Dumper;
16 use Cwd qw( abs_path );
17 use Pod::Usage;
18 use POSIX qw( :sys_wait_h );
19 use File::Temp qw( tempfile tempdir );
20 use POSIX qw( strftime );
21 use Time::HiRes qw( time );
22
23 our %killed_pids = ();
24
25 sub REAPER {
26     my $child;
27     while ( ( $child = waitpid( -1, WNOHANG ) ) > 0 ) {
28         $killed_pids{ $child } = $CHILD_ERROR;
29     }
30     $SIG{ 'CHLD' } = \&REAPER;
31     return;
32 }
33
34 sub load_dump {
35     my $self = shift;
36     $self->{ 'tmpdir' } = tempdir(
37         'fast.restore.XXXXXXXX',
38         'TMPDIR'  => 1,
39         'CLEANUP' => 1,
40     );
41     $self->log( 'Starting get_list_from_schema' );
42     $self->get_list_from_schema();
43     $self->log( 'get_list_from_schema finished.' );
44
45     $self->log( 'Starting load_base_schema' );
46     $self->load_base_schema();
47     $self->log( 'load_base_schema finished.' );
48
49     $self->log( 'Starting load_data' );
50     $self->load_data();
51     $self->log( 'load_data finished.' );
52
53     $self->log( 'Starting get_tables_size' );
54     $self->get_tables_size();
55     $self->log( 'get_tables_size finished.' );
56
57     $self->log( 'Starting get_indexes_size' );
58     $self->get_indexes_size();
59     $self->log( 'get_indexes_size finished.' );
60
61     $self->log( 'Starting get_fkeys_size' );
62     $self->get_fkeys_size();
63     $self->log( 'get_fkeys_size finished.' );
64
65     $self->log( 'Starting create_constraints_and_indexes' );
66     $self->create_constraints_and_indexes();
67     $self->log( 'create_constraints_and_indexes finished.' );
68
69     $self->log( 'Starting create_foreign_keys' );
70     $self->create_foreign_keys();
71     $self->log( 'create_foreign_keys finished.' );
72
73     $self->log( 'Starting finish_schema' );
74     $self->finish_schema();
75     $self->log( 'finish_schema finished.' );
76
77     return;
78 }
79
80 sub log {
81     my $self = shift;
82     my ( $format, @args ) = @_;
83     $format =~ s/\s*\z//;
84
85     my $time         = time();
86     my $date_time    = strftime( '%Y-%m-%d %H:%M:%S', localtime $time );
87     my $miliseconds = ( $time * 1000 ) % 1000;
88
89     my $time_stamp = sprintf "%s.%03u", $date_time, $miliseconds;
90     my $msg = sprintf $format, @args;
91
92     printf "%s : %s\n", $time_stamp, $msg;
93     return;
94 }
95
96 sub get_tables_size {
97     my $self = shift;
98     my $data = $self->psql( "SELECT n.nspname, c.relname, pg_relation_size( c.oid ) FROM pg_class c JOIN pg_namespace n on c.relnamespace = n.oid where c.relkind = 'r' and c.relname !~ '^pg_'" );
99     for my $i ( @{ $data } ) {
100         $self->{ 'size' }->{ $i->[ 0 ] }->{ $i->[ 1 ] } = $i->[ 2 ];
101     }
102     return;
103 }
104
105 sub get_indexes_size {
106     my $self = shift;
107     open my $fh, '<', 'index.sizes' or croak( "Cannot open index.sizes in dump: $OS_ERROR\n" );
108     while ( my $l = <$fh> ) {
109         $l =~ s/\s*\z//;
110         my @c = split /\t/, $l;
111         $self->{ 'size' }->{ $c[ 0 ] }->{ $c[ 1 ] } = $c[ 2 ];
112     }
113     close $fh;
114     return;
115 }
116
117 sub get_fkeys_size {
118     my $self = shift;
119     open my $fh, '<', 'fkeys.ordering' or croak( "Cannot open fkeys.ordering in dump: $OS_ERROR\n" );
120     while ( my $l = <$fh> ) {
121         $l =~ s/\s*\z//;
122         my @c = split /\t/, $l;
123         $self->{ 'fkeys' }->{ $c[ 0 ] }->{ $c[ 1 ] } = {
124             'tables' => [ sort ( $c[ 2 ], $c[ 3 ] ) ],
125             'size'   => $c[ 4 ],
126         };
127     }
128     close $fh;
129     return;
130 }
131
132 sub load_data {
133     my $self = shift;
134
135     $self->find_data_files_to_load();
136     $self->process_in_parallel(
137         'next_data' => sub { my $x = shift @{ $self->{ 'files' } }; return ( $x, $x ); },
138         'show_progress' => sub { return $self->show_progress_for_data_files(); },
139         'worker'        => sub { return $self->process_data_file( @_ ) },
140     );
141     return;
142 }
143
144 sub create_foreign_keys {
145     my $self = shift;
146     my @fk_lines = grep { 'FK' eq $_->{ 'type' } } @{ $self->{ 'list' } };
147     return if 0 == scalar @fk_lines;
148
149     for my $fk ( @fk_lines ) {
150         my @words = split /\s+/, $fk->{ 'line' };
151         my ( $schema, $fkey_name ) = @words[ 5, 6 ];
152         croak( "There is no meta info for fkey $schema.$fkey_name\n" ) unless $self->{ 'fkeys' }->{ $schema }->{ $fkey_name };
153         my $M = $self->{ 'fkeys' }->{ $schema }->{ $fkey_name };
154         @{ $fk }{ keys %{ $M } } = values %{ $M };
155         $fk->{ 'name' } = $schema . '.' . $fkey_name;
156     }
157     my @sorted = sort { $b->{ 'size' } <=> $a->{ 'size' } } @fk_lines;
158     $self->{ 'fkeys_list' } = \@sorted;
159
160     my $t = $self->{ 'jobs' };
161     $self->{ 'jobs' } = $self->{ 'fkey-jobs' };
162     $self->process_in_parallel(
163         'next_data' => sub { my $x = shift @{ $self->{ 'fkeys_list' } }; return unless defined $x; return ( $x->{ 'name' }, $x ) },
164         'show_progress' => sub { return $self->show_progress_for_fkeys(); },
165         'worker'        => sub { return $self->process_fkey( @_ ) },
166     );
167     $self->{ 'jobs' } = $t;
168     return;
169 }
170
171 sub create_constraints_and_indexes {
172     my $self = shift;
173     $self->make_list_of_constraints_and_indexes();
174     return unless defined $self->{ 'ic_list' };
175     $self->process_in_parallel(
176         'next_data' => sub { my $x = shift @{ $self->{ 'ic_list' } }; return unless defined $x; return ( $x->{ 'name' }, $x ) },
177         'show_progress' => sub { return $self->show_progress_for_ic_files(); },
178         'worker'        => sub { return $self->pg_restore( \@_, 1 ) },
179     );
180     return;
181 }
182
183 sub make_list_of_constraints_and_indexes {
184     my $self = shift;
185     my @index_lines;
186     my @constraint_lines;
187     for my $i ( @{ $self->{ 'list' } } ) {
188         push @index_lines,      $i if $i->{ 'type' } eq 'INDEX';
189         push @constraint_lines, $i if $i->{ 'type' } eq 'CONSTRAINT';
190     }
191     return if ( 0 == scalar @index_lines ) && ( 0 == scalar @constraint_lines );
192     for my $i ( @index_lines, @constraint_lines ) {
193         my @c = split /\s+/, $i->{ 'line' };
194         my ( $schema, $name ) = @c[ 4, 5 ];
195         $i->{ 'name' } = $schema . '.' . $name;
196         if ( $self->{ 'size' }->{ $schema } ) {
197             if ( $self->{ 'size' }->{ $schema }->{ $name } ) {
198                 $i->{ 'size' } = $self->{ 'size' }->{ $schema }->{ $name };
199             }
200             else {
201
202                 # That would be pretty interesting to get in here
203                 my $count = 0;
204                 my $sum   = 0;
205                 for my $j ( values %{ $self->{ 'size' }->{ $schema } } ) {
206                     $count++;
207                     $sum += $j;
208                 }
209                 $self->{ 'size' } = $sum / $count;    # Just an estimate, I can't figure out how we'd get in here anyway.
210             }
211         }
212         else {
213
214             # That would be pretty interesting to get in here, too
215             $i->{ 'size' } = 0;
216         }
217     }
218     my @all_lines = ();
219     push @all_lines, sort { $b->{ 'size' } <=> $a->{ 'size' } } @constraint_lines;
220     push @all_lines, sort { $b->{ 'size' } <=> $a->{ 'size' } } @index_lines;
221     $self->{ 'ic_list' } = \@all_lines;
222     return;
223 }
224
225 sub show_progress_for_data_files {
226     my $self = shift;
227     unless ( defined $self->{ 'files_count' } ) {
228         $self->{ 'files_count' } = scalar @{ $self->{ 'files' } };
229         return;
230     }
231     my $workers  = scalar keys %{ $self->{ 'kids' } };
232     my $in_queue = scalar @{ $self->{ 'files' } };
233     if (   ( 0 == $workers )
234         && ( 0 == $in_queue ) )
235     {
236         print "\n";
237         return;
238     }
239     printf "%d data files loading. %d more to load. (total %d files to be processed).   \r", $workers, $in_queue, $self->{ 'files_count' };
240     return;
241 }
242
243 sub show_progress_for_fkeys {
244     my $self = shift;
245     unless ( defined $self->{ 'fkeys_count' } ) {
246         $self->{ 'fkeys_count' } = scalar @{ $self->{ 'fkeys_list' } };
247         return;
248     }
249     my $workers  = scalar keys %{ $self->{ 'kids' } };
250     my $in_queue = scalar @{ $self->{ 'fkeys_list' } };
251     if (   ( 0 == $workers )
252         && ( 0 == $in_queue ) )
253     {
254         print "\n";
255         return;
256     }
257     printf "%d fkeys loading. %d more to load. (total %d fkeys to be created).   \r", $workers, $in_queue, $self->{ 'fkeys_count' };
258     return;
259 }
260
261 sub show_progress_for_ic_files {
262     my $self = shift;
263     unless ( defined $self->{ 'ic_count' } ) {
264         $self->{ 'ic_count' } = scalar @{ $self->{ 'ic_list' } };
265         return;
266     }
267     my $workers  = scalar keys %{ $self->{ 'kids' } };
268     my $in_queue = scalar @{ $self->{ 'ic_list' } };
269     if (   ( 0 == $workers )
270         && ( 0 == $in_queue ) )
271     {
272         print "\n";
273         return;
274     }
275     printf "%d index/constraint files loading. %d more to load. (total %d files to be processed).   \r", $workers, $in_queue, $self->{ 'ic_count' };
276     return;
277 }
278
279 sub process_data_file {
280     my $self     = shift;
281     my $datafile = shift;
282
283     my @cat = ();
284     if ( $self->{ 'compressor' } ) {
285         push @cat, $self->{ 'compressor' }, '-dc';
286     }
287     else {
288         push @cat, 'cat';
289     }
290     push @cat, $datafile;
291
292     my @psql = ( $self->{ 'psql' }, '-qAtX' );
293
294     my $cat_cmd  = join ' ', map { quotemeta $_ } @cat;
295     my $psql_cmd = join ' ', map { quotemeta $_ } @psql;
296
297     my $full_cmd = join ' | ', $cat_cmd, $psql_cmd;
298
299     my $return = system $full_cmd;
300     exit 1 if $return;
301     return;
302 }
303
304 sub process_fkey {
305     my $self = shift;
306     my $fkey = shift;
307
308     my $output = $self->pg_restore( [ $fkey ], 0 );
309
310     my @sql_lines = ();
311     push @sql_lines, "BEGIN;";
312     push @sql_lines, "LOCK TABLE ONLY $_ IN SHARE ROW EXCLUSIVE MODE;" for @{ $fkey->{ 'tables' } };
313     push @sql_lines, $output . "";
314     push @sql_lines, "COMMIT;";
315
316     $self->psql( join("\n", @sql_lines ) );
317
318     return;
319 }
320
321 sub process_in_parallel {
322     my $self   = shift;
323     my %args   = @_;
324     my $f_next = $args{ 'next_data' };
325     my $f_show = $args{ 'show_progress' };
326     my $f_work = $args{ 'worker' };
327
328     # Initialize progress info
329     $f_show->();
330
331     $SIG{ 'CHLD' } = \&REAPER;
332
333     my $alert = 0;
334     my $kids  = {};
335     $self->{ 'kids' } = $kids;
336     while ( 1 ) {
337         my @pids = keys %killed_pids;
338         for my $killed ( @pids ) {
339             my $rc    = delete $killed_pids{ $killed };
340             my $label = delete $kids->{ $killed };
341             next unless $rc;
342             $alert = 1;
343             print "\nGot non-zero return from one of workers ($label). Abort.\n";
344         }
345         while ( $self->{ 'jobs' } > scalar keys %{ $kids } ) {
346             last if $alert;
347             my ( $label, $data ) = $f_next->();
348             last unless defined $data;
349
350             my $pid = fork();
351             croak "cannot fork" unless defined $pid;
352             if ( $pid == 0 ) {
353
354                 # It's worker process.
355                 delete $SIG{ 'CHLD' };
356                 $f_work->( $data );
357                 exit;
358             }
359
360             # It's master.
361             $kids->{ $pid } = $label;
362         }
363
364         $f_show->();
365         last if 0 == scalar keys %{ $kids };
366         sleep 60;    # sleep will get interrupted when child exits, and then the loop will repeat.
367     }
368     return;
369 }
370
371 sub find_data_files_to_load {
372     my $self = shift;
373     my $dir;
374
375     croak( 'Cannot opendir() on ' . $self->{ 'input' } . ": $OS_ERROR\n" ) unless opendir( $dir, '.' );
376     my @names = readdir $dir;
377     closedir $dir;
378
379     my @data_files = ();
380     for my $file_name ( @names ) {
381         next unless -f $file_name;
382         next unless $file_name =~ m{\Adata\.[A-Za-z0-9_]+\.[A-Za-z0-9_]+\.\d+\.dump\z};
383         my $file_size = ( stat( $file_name ) )[ 7 ];
384         push @data_files,
385             {
386             'file_path' => $file_name,
387             'file_size' => $file_size,
388             };
389     }
390
391     $self->{ 'files' } = [ map { $_->{ 'file_path' } } sort { $b->{ 'file_size' } <=> $a->{ 'file_size' } } @data_files ];
392     return;
393 }
394
395 sub load_base_schema {
396     my $self = shift;
397
398     my @items = ();
399
400     for my $i ( @{ $self->{ 'list' } } ) {
401         next if $i->{ 'type' } eq 'INDEX';
402         next if $i->{ 'type' } eq 'FK';
403         next if $i->{ 'type' } eq 'CONSTRAINT';
404         next if $i->{ 'type' } eq 'TRIGGER';
405         next if $i->{ 'type' } eq 'ACL';
406         push @items, $i;
407     }
408
409     $self->pg_restore( \@items, 1 );
410
411     $self->psql( "\\i sequences.sql" );
412
413     return;
414 }
415
416 sub finish_schema {
417     my $self = shift;
418
419     $self->pg_restore(
420         [ grep { $_->{'type'} =~ m{\A (?: TRIGGER | ACL ) \z }xms } @{ $self->{'list'} } ],
421         1
422     );
423
424     return;
425 }
426
427 sub get_list_from_schema {
428     my $self = shift;
429     my $list = $self->run_command(
430         'pg_restore',
431         '-l',
432         'schema.dump'
433     );
434
435     my @input = split /\r?\n/, $list;
436
437     my @objects = ();
438
439     for my $line ( @input ) {
440         next unless $line =~ /^\d+;/;
441         croak "Strange line in pg_restore -l output: [$line]\n" unless $line =~ m{
442             \A
443             \d+
444             ;
445             \s+
446             \d+
447             \s+
448             \d+
449             \s+
450             ([A-Z]+)
451             \s+
452         }xms;
453         my $type = $1;
454         next if 'DATABASE' eq $type;
455         push @objects,
456             {
457             'type' => $type,
458             'line' => $line,
459             };
460     }
461
462     $self->{ 'list' } = \@objects;
463     return;
464 }
465
466 sub new {
467     my $class = shift;
468     my $self  = {};
469     bless $self, $class;
470     return $self;
471 }
472
473 sub run {
474     my $self = shift;
475
476     $self->read_options();
477     $self->show_running_details();
478     $self->confirm_work();
479     $self->load_dump();
480     return;
481 }
482
483 sub confirm_work {
484     my $self = shift;
485     printf "\n\nAre you sure you want to continue?\n";
486     printf "Enter YES to continue: ";
487     my $input = <STDIN>;
488     exit unless $input =~ m{\AYES\r?\n?\z};
489     return;
490 }
491
492 sub show_running_details {
493     my $self = shift;
494
495     my $db = $self->psql( 'SELECT current_user, current_database()' );
496     $self->{ 'database' } = $db->[ 0 ]->[ 1 ];
497
498     my $largest_tables = $self->psql(
499         q{
500             SELECT
501                 *
502             FROM
503                 (
504                     SELECT
505                         rpad(oid::regclass::text, 32) || ' (' || pg_size_pretty(pg_relation_size(oid)) || ')'
506                     FROM
507                         pg_class
508                     WHERE
509                         relkind = 'r'
510                         and relname !~ '^pg_'
511                     order by
512                         pg_relation_size(oid) desc
513                     limit 5
514                 ) x
515             order by
516                 1
517         }
518     );
519
520     my @tables = map { $_->[ 0 ] } @{ $largest_tables };
521
522     printf "Config:\n";
523     for my $key ( sort keys %{ $self } ) {
524         printf "%-10s : %s\n", $key, $self->{ $key };
525     }
526
527     printf "\nDatabase details:\n";
528     printf "User          : %s\n", $db->[ 0 ]->[ 0 ];
529     printf "Database      : %s\n", $db->[ 0 ]->[ 1 ];
530     printf "Sample tables : %s\n", shift @tables;
531     printf "              - %s\n", $_ for @tables;
532     return;
533 }
534
535 sub read_options {
536     my $self = shift;
537
538     my $opts = {
539         'psql'       => 'psql',
540         'pg_restore' => 'pg_restore',
541         'input'      => '.',
542         'jobs'       => 1,
543         'fkey-jobs'  => 1,
544     };
545
546     my $is_ok = GetOptions( $opts, qw( help|? input|o=s compressor|c=s jobs|j=i fkey-jobs|f=i psql|p=s pg_restore|r=s ) );
547     pod2usage( '-verbose' => 1, ) unless $is_ok;
548     pod2usage( '-verbose' => 99, '-sections' => [ qw( DESCRIPTION SYNOPSIS OPTIONS ) ] ) if $opts->{ 'help' };
549
550     pod2usage( '-message' => 'Input directory has to be given.' ) if !$opts->{ 'input' };
551     pod2usage( '-message' => 'Input directory does not exist.' )  if !-e $opts->{ 'input' };
552     pod2usage( '-message' => 'Input is not directory.' )          if !-d $opts->{ 'input' };
553     pod2usage( '-message' => 'Input directory is not writable.' ) if !-w $opts->{ 'input' };
554
555     pod2usage( '-message' => 'Number of jobs has to be not-empty.' ) if '' eq $opts->{ 'jobs' };
556     $opts->{ 'jobs' } = int( $opts->{ 'jobs' } );
557     pod2usage( '-message' => 'Number of jobs cannot be less than 1.' )   if 1 > $opts->{ 'jobs' };
558     pod2usage( '-message' => 'Number of jobs cannot be more than 100.' ) if 100 < $opts->{ 'jobs' };
559
560     pod2usage( '-message' => 'Number of fkey-jobs has to be not-empty.' ) if '' eq $opts->{ 'fkey-jobs' };
561     $opts->{ 'fkey-jobs' } = int( $opts->{ 'fkey-jobs' } );
562     pod2usage( '-message' => 'Number of fkey-jobs cannot be less than 1.' )   if 1 > $opts->{ 'fkey-jobs' };
563     pod2usage( '-message' => 'Number of fkey-jobs cannot be more than 100.' ) if 100 < $opts->{ 'fkey-jobs' };
564
565     $opts->{ 'input' } = abs_path( $opts->{ 'input' } );
566     @{ $self }{ keys %{ $opts } } = values %{ $opts };
567     chdir $self->{ 'input' };
568     return;
569 }
570
571 sub pg_restore {
572     my $self = shift;
573     my ( $lines, $to_db ) = @_;
574
575     my ( $list_fh, $list_filename ) = tempfile( 'list.XXXXXX', 'DIR' => $self->{ 'tmpdir' }, );
576     print $list_fh $_->{ 'line' } . "\n" for @{ $lines };
577     close $list_fh;
578
579     my @cmd = ( 'pg_restore', '-L', $list_filename );
580     push @cmd, ( '-d', $self->{'database'} ) if $to_db;
581     push @cmd, 'schema.dump';
582
583     my $response = $self->run_command( @cmd );
584
585     unlink $list_filename;
586
587     return $response;
588 }
589
590 sub psql {
591     my $self       = shift;
592     my $query      = shift;
593     my $query_file = shift;
594
595     my $remove_query_file = 1;
596
597     my $query_fh;
598     if ( defined $query_file ) {
599         $remove_query_file = 0;
600         open $query_fh, '>', $query_file or croak( "Cannot write to $query_file: $OS_ERROR\n" );
601     }
602     else {
603         ( $query_fh, $query_file ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
604     }
605
606     print $query_fh $query;
607     close $query_fh;
608     my $output = $self->run_command( qw( psql -qAtX -F ), "\t", '-f', $query_file );
609     unlink $query_file if $remove_query_file;
610
611     my @rows = grep { '' ne $_ } split /\r?\n/, $output;
612     my @data = map { [ split /\t/, $_ ] } @rows;
613
614     return \@data;
615 }
616
617 sub run_command {
618     my $self = shift;
619     my ( @cmd ) = @_;
620
621     # Use paths provided by user as command line options
622     $cmd[ 0 ] = $self->{ $cmd[ 0 ] } if $self->{ $cmd[ 0 ] };
623
624     my $real_command = join( ' ', map { quotemeta } @cmd );
625
626     my ( $stdout_fh, $stdout_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'DIR' => $self->{ 'tmpdir' }, );
627     my ( $stderr_fh, $stderr_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'DIR' => $self->{ 'tmpdir' }, );
628
629     $real_command .= sprintf ' 2>%s >%s', quotemeta $stderr_filename, quotemeta $stdout_filename;
630
631     system $real_command;
632     local $/ = undef;
633     my $stdout = <$stdout_fh>;
634     my $stderr = <$stderr_fh>;
635
636     close $stdout_fh;
637     close $stderr_fh;
638
639     unlink( $stdout_filename, $stderr_filename );
640
641     my $error_code;
642     if ( $CHILD_ERROR == -1 ) {
643         $error_code = $OS_ERROR;
644     }
645     elsif ( $CHILD_ERROR & 127 ) {
646         $error_code = sprintf "child died with signal %d, %s coredump\n", ( $CHILD_ERROR & 127 ), ( $CHILD_ERROR & 128 ) ? 'with' : 'without';
647     }
648     else {
649         $error_code = $CHILD_ERROR >> 8;
650     }
651
652     croak( "Couldn't run $real_command : " . $stderr ) if $error_code;
653
654     return $stdout;
655 }
656
657 =head1 NAME
658
659 fast.restore - Program to do restore dumps of fast.dump. And do it FAST.
660
661 =head1 SYNOPSIS
662
663 fast.restore [--input=directory/] [--compressor=/usr/bin/gzip] [--jobs=n] [--fkey-jobs=n] [--psql=/usr/bin/psql] [--pg_restore=/usr/bin/pg_restore] [--help]
664
665 =head1 OPTIONS
666
667 =over
668
669 =item --input - Directory where the dump files are. Defaults to current directory.
670
671 =item --compressor - path to compressor that should be used to uncompress
672 data. Default is empty, which doesn't decompress. This should be set to the
673 same compression program that was used when doing fast.dump.
674
675 =item --jobs - how many concurrent processes to run when restoring data to
676 tables and creating indexes. Defaults to 1.
677
678 =item --fkey-jobs - how many concurrent processes to run when creating foreign keys.
679 Defaults to 1.
680
681 =item --psql - path to psql program. Defaults to "psql", which will use
682 $PATH environment variable to find it.
683
684 =item --pg_restore - path to pg_restore program. Defaults to "pg_restore",
685 which will use $PATH environment variable to find it.
686
687 =item --help - shows information about usage of the program.
688
689 =back
690
691 All options can be given in abbreviated version, using single dash character
692 and first letter of option, like:
693
694     fast.dump -i /tmp -c bzip2 -j 16
695
696 Database connection details should be given using PG* environment variables.
697
698 =head1 DESCRIPTION
699
700 fast.restore is couterpart to fast.dump.
701
702 It is used to load dumps made by fast.dump in parallel way so that the
703 process time will be as short as possible.
704
705 It cannot be used with normal (pg_dump made) dumps.
Note: See TracBrowser for help on using the browser.