root/trunk/tools/fast.dump.and.restore/fast.dump

Revision 254, 21.2 kB (checked in by depesz, 6 years ago)

Forgot to commit.

Make fast.dump log also information about how big are the indexes on source system. This can be later used for fast.restore to order index creation properly.

  • Property svn:executable set to *
Line 
1 #!/usr/bin/env perl
2 package main;
3 use strict;
4 use warnings;
5 my $program = Omni::Program::Pg::FastDump->new();
6 $program->run();
7 exit;
8
9 package Omni::Program::Pg::FastDump;
10 use strict;
11 use warnings;
12 use Carp qw( croak carp );
13 use English qw( -no_match_vars );
14 use Getopt::Long qw( :config no_ignore_case );
15 use Data::Dumper;
16 use Cwd qw( abs_path );
17 use Pod::Usage;
18 use POSIX qw( :sys_wait_h );
19 use File::Spec;
20 use File::Temp qw( tempfile );
21
22 our %killed_pids = ();
23
24 sub REAPER {
25     my $child;
26     while ( ( $child = waitpid( -1, WNOHANG ) ) > 0 ) {
27         $killed_pids{ $child } = time();
28     }
29     $SIG{ 'CHLD' } = \&REAPER;
30     return;
31 }
32
33 sub new {
34     my $class = shift;
35     my $self  = {};
36     bless $self, $class;
37     return $self;
38 }
39
40 sub run {
41     my $self = shift;
42
43     $self->read_options();
44     $self->show_running_details();
45     $self->confirm_work();
46     $self->make_dump();
47     return;
48 }
49
50 sub make_dump {
51     my $self = shift;
52     $self->dump_schema();
53     $self->get_list_of_tables();
54     $self->split_tables_into_blobs();
55     $self->order_blobs();
56     $self->launch_dumpers();
57     return;
58 }
59
60 sub launch_dumpers {
61     my $self = shift;
62     $OUTPUT_AUTOFLUSH = 1;
63     $SIG{ 'CHLD' } = \&REAPER;
64
65     my %c = map { ( $_ => 0 ) } qw( total partial full );
66     for my $t ( @{ $self->{ 'blobs' } } ) {
67         $c{ 'total' }++;
68         $c{ $t->{ 'blob_type' } }++;
69     }
70
71     open my $fh, ">", File::Spec->catfile( $self->{ 'output' }, 'index.lst' ) or croak( "Cannot create index file: $OS_ERROR\n" );
72     printf $fh '%-5s | %-7s | %-32s | %-32s | %-10s | %s%s', '#', qw( type schema table size condition ), "\n";
73     for my $i ( @{ $self->{ 'blobs' } } ) {
74         printf $fh '%5d | %-7s | %-32s | %-32s | %-10s | %s%s', @{ $i }{ qw( id blob_type schema table size ) }, ( $i->{ 'condition' } || '' ), "\n";
75     }
76
77     printf "%d blobs to be processed. %d full and %d partial.\n", @c{ qw( total full partial ) };
78     my %running_kids = ();
79     while ( 1 ) {
80         my @pids = keys %killed_pids;
81         for my $killed ( @pids ) {
82             my $blob = delete $running_kids{ $killed };
83             next unless $blob;
84             my $end_time = delete $killed_pids{ $killed };
85             printf $fh "%s dump (#%d) of %s.%s finished after %d seconds.\n", $blob->{ 'blob_type' }, $blob->{ 'id' }, $blob->{ 'schema' }, $blob->{ 'table' }, $end_time - $blob->{ 'started' };
86         }
87         while ( $self->{ 'jobs' } > scalar keys %running_kids ) {
88             last if 0 == scalar @{ $self->{ 'blobs' } };
89             my $blob = shift @{ $self->{ 'blobs' } };
90             my $pid  = fork();
91             croak "cannot fork" unless defined $pid;
92             if ( $pid == 0 ) {
93
94                 # It's worker process.
95                 delete $SIG{ 'CHLD' };
96                 $self->make_single_blob( $blob );
97                 exit;
98             }
99
100             # It's master.
101             $blob->{ 'started' } = time();
102             $running_kids{ $pid } = $blob;
103         }
104
105         my %what_runs = map { ( $_ => 0 ) } qw( total partial full );
106         for my $t ( values %running_kids ) {
107             $what_runs{ 'total' }++;
108             $what_runs{ $t->{ 'blob_type' } }++;
109         }
110         my %what_waits = map { ( $_ => 0 ) } qw( total partial full );
111         for my $t ( @{ $self->{ 'blobs' } } ) {
112             $what_waits{ 'total' }++;
113             $what_waits{ $t->{ 'blob_type' } }++;
114         }
115         printf 'Running: %3d workers (%3d full, %5d partial). Pending: %3d blobs (%3d full, %5d partial).%s',
116             $what_runs{ 'total' },
117             $what_runs{ 'full' },
118             $what_runs{ 'partial' },
119             $what_waits{ 'total' }, $what_waits{ 'full' }, $what_waits{ 'partial' }, "     \r",    # just some spaces to clean leftover chars, and rewind to beginning of line
120             ;
121         last if ( 0 == $what_runs{ 'total' } ) && ( 0 == $what_waits{ 'total' } );
122         sleep 60;                                                                                  # sleep will get interrupted when child exits, and then the loop will repeat.
123     }
124     printf '%sAll done.%s', "\n", "\n";
125     return;
126 }
127
128 sub make_single_blob {
129     my $self = shift;
130     my $blob = shift;
131
132     my $file_name = $self->get_file_name( $blob );
133     $PROGRAM_NAME .= ' ... ' . $file_name;
134
135     my $output_path = File::Spec->catfile( $self->{ 'output' }, $file_name );
136     my $sql_filename = $output_path . '.sql';
137     open my $sql_fh, '>', $sql_filename or croak( "Cannot write to $sql_filename: $OS_ERROR\n" );
138
139     if ( $blob->{ 'blob_type' } eq 'partial' ) {
140         print $sql_fh "set enable_seqscan = false;\n";
141         printf $sql_fh "SELECT 'COPY %s FROM STDIN;';\n", $blob->{ 'full_name' };
142         printf $sql_fh 'COPY (SELECT * FROM %s WHERE %s) TO stdout;%s', $blob->{ 'full_name' }, $blob->{ 'condition' }, "\n";
143         printf $sql_fh "SELECT E'\\\\.'\n";
144         $PROGRAM_NAME = 'Partial dump of ' . $blob->{ 'full_name' };
145     }
146     else {
147         printf $sql_fh "SELECT 'COPY %s FROM STDIN;';\n", $blob->{ 'full_name' };
148         printf $sql_fh 'COPY %s TO stdout;%s', $blob->{ 'full_name' }, "\n";
149         printf $sql_fh "SELECT E'\\\\.'\n";
150         $PROGRAM_NAME = 'Full dump of ' . $blob->{ 'full_name' };
151     }
152     close $sql_fh;
153
154     my @cmd = ( $self->{ 'psql' }, '-qAtX', '-f', $sql_filename, );
155
156     my $psql_call = join( ' ', map { quotemeta } @cmd );
157     if ( $self->{ 'compressor' } ) {
158         $psql_call .= ' | ' . quotemeta( $self->{ 'compressor' } ) . ' -c -';
159     }
160
161     my ( $stderr_fh, $stderr_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
162
163     $psql_call .= sprintf ' 2>%s >%s', quotemeta $stderr_filename, quotemeta $output_path;
164
165     system $psql_call;
166     local $/ = undef;
167     my $stderr = <$stderr_fh>;
168
169     close $stderr_fh;
170     unlink( $stderr_filename );
171
172     my $error_code;
173     if ( $CHILD_ERROR == -1 ) {
174         $error_code = $OS_ERROR;
175     }
176     elsif ( $CHILD_ERROR & 127 ) {
177         $error_code = sprintf "child died with signal %d, %s coredump\n", ( $CHILD_ERROR & 127 ), ( $CHILD_ERROR & 128 ) ? 'with' : 'without';
178     }
179     else {
180         $error_code = $CHILD_ERROR >> 8;
181     }
182
183     croak( "\nCouldn't run $psql_call : " . $stderr ) if $error_code;
184     return;
185 }
186
187 sub get_file_name {
188     my $self = shift;
189     my $blob = shift;
190
191     my @output_parts = ( 'data', $blob->{ 'schema' }, $blob->{ 'table' }, $blob->{ 'id' }, 'dump' );
192
193     for my $part ( @output_parts ) {
194         $part =~ s/([^a-zA-Z0-9])/sprintf "_%02x", ord( $1 )/ges;
195     }
196
197     my $output = join '.', @output_parts;
198
199     return $output;
200 }
201
202 sub order_blobs {
203     my $self  = shift;
204     my $i     = 0;
205     my @blobs = map { $_->{ 'id' } = $i++; $_ } sort { $b->{ 'size' } <=> $a->{ 'size' } || $a->{ 'table' } cmp $b->{ 'table' } } @{ $self->{ 'blobs' } };
206     $self->{ 'blobs' } = \@blobs;
207     return;
208 }
209
210 sub split_tables_into_blobs {
211     my $self = shift;
212
213     my @to_split = ();
214     my @blobs    = ();
215     my %oids     = ();
216
217     while ( my ( $schema_name, $tables_hash ) = each %{ $self->{ 'tables' } } ) {
218         while ( my ( $table_name, $table_data ) = each %{ $tables_hash } ) {
219             if ( $table_data->{ 'size' } <= $self->{ 'max-size' } ) {
220                 $table_data->{ 'blob_type' } = 'full';
221                 push @blobs, $table_data;
222                 next;
223             }
224             push @to_split, $table_data;
225             $oids{ $table_data->{ 'oid' } } = $table_data;
226         }
227     }
228     if ( 0 == scalar @to_split ) {
229         $self->{ 'blobs' } = \@blobs;
230         return;
231     }
232     my $oids = join( ',', map { $_->{ 'oid' } } @to_split );
233
234     my $pkey_columns = $self->psql(
235 "select distinct on (i.indrelid) i.indrelid, a.attnum, a.attname, t.typname from pg_index i join pg_attribute a on i.indexrelid = a.attrelid join pg_type t on a.atttypid = t.oid where indrelid = ANY('{$oids}'::oid[]) and indisprimary order by i.indrelid, a.attnum"
236     );
237     croak( "pkey_columns is not arrayref? Something is wrong!" ) unless 'ARRAY' eq ref $pkey_columns;
238     croak( "pkey_columns is not arrayref of arrayrefs? Something is wrong!" ) unless 'ARRAY' eq ref $pkey_columns->[ 0 ];
239
240     my ( $sql_fh, $sql_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
241
242     for my $row ( @{ $pkey_columns } ) {
243         $oids{ $row->[ 0 ] }->{ 'partition_key' }    = $row->[ 2 ];
244         $oids{ $row->[ 0 ] }->{ 'partition_values' } = [];
245         my $sql_query = sprintf q{
246             SELECT
247                 s2.starelid,
248                 quote_literal(s2.vals[i]),
249                 s2.probability[i]
250             FROM
251                 (
252                     SELECT
253                         s1.*,
254                         generate_series( array_lower( s1.vals, 1 ), array_upper( s1.vals, 1 ) ) as i
255                     FROM
256                         (
257                             SELECT
258                                 s.starelid,
259                                 case
260                                 when s.stakind1 = 2 THEN stavalues1
261                                 when s.stakind2 = 2 THEN stavalues2
262                                 when s.stakind3 = 2 THEN stavalues3
263                                 when s.stakind4 = 2 THEN stavalues4
264                                 when s.stakind1 = 1 THEN stavalues1
265                                 when s.stakind2 = 1 THEN stavalues2
266                                 when s.stakind3 = 1 THEN stavalues3
267                                 when s.stakind4 = 1 THEN stavalues4
268                                 ELSE NULL
269                                 END::TEXT::%s[] as vals,
270                                 case
271                                 when 2 in (s.stakind1, s.stakind2, s.stakind3, s.stakind4) THEN NULL::real[]
272                                 when s.stakind1 = 1 THEN stanumbers1
273                                 when s.stakind2 = 1 THEN stanumbers2
274                                 when s.stakind3 = 1 THEN stanumbers3
275                                 when s.stakind4 = 1 THEN
276                                 stanumbers4
277                                 ELSE
278                                 NULL::real[]
279                                 END as probability
280                             FROM
281                                 pg_statistic s
282                             WHERE
283                                 s.starelid = %d
284                                 AND staattnum = %d
285                                 AND ( 2 in (s.stakind1, s.stakind2, s.stakind3, s.stakind4) OR 1 in (s.stakind1, s.stakind2, s.stakind3, s.stakind4) )
286                         ) as s1
287                 ) as s2
288             ORDER BY s2.starelid, s2.vals[i];%s}, @{ $row }[ 3, 0, 1 ], "\n";
289         print $sql_fh $sql_query;
290     }
291     close $sql_fh;
292
293     my $partitions = $self->psql( '\i ' . $sql_filename );
294     unlink $sql_filename;
295
296     for my $row ( @{ $partitions } ) {
297         push @{ $oids{ $row->[ 0 ] }->{ 'partition_values' } },
298             {
299             'value'       => $row->[ 1 ],
300             'probability' => $row->[ 2 ],
301             };
302     }
303
304     for my $table ( @to_split ) {
305         if (   ( !defined $table->{ 'partition_values' } )
306             || ( 0 == scalar @{ $table->{ 'partition_values' } } ) )
307         {
308             $table->{ 'blob_type' } = 'full';
309             push @blobs, $table;
310             next;
311         }
312         for my $i ( 0 .. $#{ $table->{ 'partition_values' } } ) {
313             my $blob = {};
314             @{ $blob }{ keys %{ $table } } = ( values %{ $table } );
315             delete $blob->{ 'partition_key' };
316             delete $blob->{ 'partition_values' };
317             $blob->{ 'blob_type' } = 'partial';
318             if ( $i == 0 ) {
319                 $blob->{ 'condition' } = sprintf "%s <= %s", $table->{ 'partition_key' }, $table->{ 'partition_values' }->[ $i ]->{ 'value' };
320                 if ( $table->{ 'partition_values' }->[ $i ]->{ 'probability' } ) {
321                     $blob->{ 'size' } *= $table->{ 'partition_values' }->[ $i ]->{ 'probability' };
322                 }
323                 else {
324                     $blob->{ 'size' } = 0;
325                 }
326             }
327             else {
328                 $blob->{ 'condition' } = sprintf "%s > %s and %s <= %s", $table->{ 'partition_key' }, $table->{ 'partition_values' }->[ $i - 1 ]->{ 'value' }, $table->{ 'partition_key' },
329                     $table->{ 'partition_values' }->[ $i ]->{ 'value' };
330                 if ( $table->{ 'partition_values' }->[ $i ]->{ 'probability' } ) {
331                     $blob->{ 'size' } *= $table->{ 'partition_values' }->[ $i ]->{ 'probability' };
332                 }
333                 else {
334                     $blob->{ 'size' } /= ( scalar( @{ $table->{ 'partition_values' } } ) - 1 );
335                 }
336             }
337             push @blobs, $blob;
338         }
339         $table->{ 'blob_type' } = 'partial';
340         $table->{ 'size' }      = 0;
341         $table->{ 'condition' } = sprintf "%s > %s", $table->{ 'partition_key' }, $table->{ 'partition_values' }->[ -1 ]->{ 'value' };
342         delete $table->{ 'partition_key' };
343         delete $table->{ 'partition_values' };
344         push @blobs, $table;
345     }
346     $self->{ 'blobs' } = \@blobs;
347     delete $self->{ 'tables' };
348     return;
349 }
350
351 sub get_list_of_tables {
352     my $self = shift;
353
354     my $restored = $self->run_command( $self->{ 'pg_restore' }, '-l', File::Spec->catfile( $self->{ 'output' }, 'schema.dump' ) );
355
356     my @lines = split /\r?\n/, $restored;
357     my %tables = ();
358     for my $line ( @lines ) {
359         next unless $line =~ m{\A\d+;\s+\d+\s+\d+\s+TABLE\s+(\S+)\s+(\S+)\s+};
360         $tables{ $1 }->{ $2 } = { 'schema' => $1, 'table' => $2, };
361     }
362     if ( 0 == scalar keys %tables ) {
363         print "This dump doesn't contain any tables.\n";
364         exit 0;
365     }
366
367     my $db_sizes = $self->psql( "
368     SELECT
369         n.nspname,
370         c.relname,
371         c.oid::regclass,
372         c.oid,
373         cast(
374             (
375                 pg_relation_size(c.oid)
376                 +
377                 (
378                     CASE
379                         WHEN tn.oid IS NOT NULL THEN pg_relation_size( tc.oid )
380                         ELSE 0
381                     END
382                 )
383             ) / 1024 as int8
384         )
385     FROM
386         pg_class c
387         join pg_namespace n on c.relnamespace = n.oid
388         left outer join pg_class tc on tc.relname = 'pg_toast_' || c.oid
389         left outer join pg_namespace tn on tc.relnamespace = tn.oid and tn.nspname = 'pg_toast'
390     WHERE
391         c.relkind = 'r'
392 "
393     );
394
395     @lines = split /\r?\n/, $db_sizes;
396     for my $row ( @{ $db_sizes } ) {
397         my ( $schema, $table, $full_name, $oid, $size ) = @{ $row };
398         next unless exists $tables{ $schema };
399         next unless exists $tables{ $schema }->{ $table };
400         $tables{ $schema }->{ $table }->{ 'full_name' } = $full_name;
401         $tables{ $schema }->{ $table }->{ 'size' }      = $size;
402         $tables{ $schema }->{ $table }->{ 'oid' }       = $oid;
403     }
404
405     $self->{ 'tables' } = \%tables;
406     return;
407 }
408
409 sub dump_schema {
410     my $self = shift;
411     $self->run_command( $self->{ 'pg_dump' }, '-Fc', '-f', File::Spec->catfile( $self->{ 'output' }, 'schema.dump' ), '-s', '-v' );
412
413     my ( $sql_fh, $sql_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
414     print $sql_fh "\\o " . File::Spec->catfile( $self->{ 'output' }, 'index.sizes' ) . "\n";
415     print $sql_fh
416         "select n.nspname, c.relname, pg_relation_size(c.oid) from pg_class c join pg_namespace n on c.relnamespace = n.oid where c.relkind = 'i' and n.nspname !~ '^pg_' order by 3 desc;\n";
417     close $sql_fh;
418     $self->run_command( $self->{ 'psql' }, '-qAtX', '-F', "\t", '-f', $sql_filename );
419
420     return;
421 }
422
423 sub confirm_work {
424     my $self = shift;
425     printf "\n\nAre you sure you want to continue?\n";
426     printf "Please remember that any other ( aside from $PROGRAM_NAME ) connections to database can cause dump corruption!\n";
427     printf "Enter YES to continue: ";
428     my $input = <STDIN>;
429     exit unless $input =~ m{\AYES\r?\n?\z};
430     return;
431 }
432
433 sub show_running_details {
434     my $self = shift;
435
436     my $db = $self->psql( 'SELECT current_user, current_database()' );
437
438     my $largest_tables = $self->psql(
439 "SELECT * FROM ( SELECT rpad(oid::regclass::text, 32) || ' (' || pg_size_pretty(pg_relation_size(oid)) || ')' from pg_class where relkind = 'r' and relname !~ '^pg_' order by pg_relation_size(oid) desc limit 5) x order by 1"
440     );
441
442     my @tables = map { $_->[ 0 ] } @{ $largest_tables };
443
444     printf "Config:\n";
445     for my $key ( sort keys %{ $self } ) {
446         printf "%-10s : %s\n", $key, $self->{ $key };
447     }
448
449     printf "\nDatabase details:\n";
450     printf "User          : %s\n", $db->[ 0 ]->[ 0 ];
451     printf "Database      : %s\n", $db->[ 0 ]->[ 1 ];
452     printf "Sample tables : %s\n", shift @tables;
453     printf "              - %s\n", $_ for @tables;
454     return;
455 }
456
457 sub read_options {
458     my $self = shift;
459
460     my $opts = {
461         'psql'       => 'psql',
462         'pg_dump'    => 'pg_dump',
463         'pg_restore' => 'pg_restore',
464         'output'     => '.',
465         'jobs'       => 1,
466         'max-size'   => 10240,
467     };
468     my $is_ok = GetOptions( $opts, qw( help|? output|o=s compressor|c=s jobs|j=i max-size|m=i psql|p=s pg_dump|d=s pg_restore|r=s ) );
469     pod2usage( '-verbose' => 1, ) unless $is_ok;
470     pod2usage( '-verbose' => 99, '-sections' => [ qw( DESCRIPTION SYNOPSIS OPTIONS ) ] ) if $opts->{ 'help' };
471
472     pod2usage( '-message' => 'Output directory has to be given.' ) if !$opts->{ 'output' };
473     pod2usage( '-message' => 'Output directory does not exist.' )  if !-e $opts->{ 'output' };
474     pod2usage( '-message' => 'Output is not directory.' )          if !-d $opts->{ 'output' };
475     pod2usage( '-message' => 'Output directory is not writable.' ) if !-w $opts->{ 'output' };
476
477     pod2usage( '-message' => 'Number of jobs has to be not-empty.' ) if '' eq $opts->{ 'jobs' };
478     $opts->{ 'jobs' } = int( $opts->{ 'jobs' } );
479     pod2usage( '-message' => 'Number of jobs cannot be less than 1.' )   if 1 > $opts->{ 'jobs' };
480     pod2usage( '-message' => 'Number of jobs cannot be more than 100.' ) if 100 < $opts->{ 'jobs' };
481
482     pod2usage( '-message' => 'Max-size has to be not-empty.' ) if '' eq $opts->{ 'max-size' };
483     $opts->{ 'max-size' } = int( $opts->{ 'max-size' } );
484     pod2usage( '-message' => 'Max-size cannot be less than 1.' ) if 1 > $opts->{ 'max-size' };
485
486     $opts->{ 'output' } = abs_path( $opts->{ 'output' } );
487     @{ $self }{ keys %{ $opts } } = values %{ $opts };
488     return;
489 }
490
491 sub psql {
492     my $self   = shift;
493     my $query  = shift;
494     my $output = $self->run_command( $self->{ 'psql' }, '-qAtX', '-F', "\t", '-c', $query, );
495     my @rows   = grep { '' ne $_ } split /\r?\n/, $output;
496     my @data   = map { [ split /\t/, $_ ] } @rows;
497     return \@data;
498 }
499
500 sub run_command {
501     my $self = shift;
502     my ( @cmd ) = @_;
503
504     my $real_command = join( ' ', map { quotemeta } @cmd );
505
506     my ( $stdout_fh, $stdout_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
507     my ( $stderr_fh, $stderr_filename ) = tempfile( 'fast.dump.XXXXXXXX', 'TMPDIR' => 1, );
508
509     $real_command .= sprintf ' 2>%s >%s', quotemeta $stderr_filename, quotemeta $stdout_filename;
510
511     system $real_command;
512     local $/ = undef;
513     my $stdout = <$stdout_fh>;
514     my $stderr = <$stderr_fh>;
515
516     close $stdout_fh;
517     close $stderr_fh;
518
519     unlink( $stdout_filename, $stderr_filename );
520
521     my $error_code;
522     if ( $CHILD_ERROR == -1 ) {
523         $error_code = $OS_ERROR;
524     }
525     elsif ( $CHILD_ERROR & 127 ) {
526         $error_code = sprintf "child died with signal %d, %s coredump\n", ( $CHILD_ERROR & 127 ), ( $CHILD_ERROR & 128 ) ? 'with' : 'without';
527     }
528     else {
529         $error_code = $CHILD_ERROR >> 8;
530     }
531
532     croak( "Couldn't run $real_command : " . $stderr ) if $error_code;
533
534     return $stdout;
535 }
536
537 =head1 NAME
538
539 fast.dump - Program to do very fast dumps of PostgreSQL database
540
541 =head1 SYNOPSIS
542
543 fast.dump [--output=directory/] [--compressor=/usr/bin/gzip] [--jobs=n] [--max-size=n] [--psql=/usr/bin/psql] [--pg_dump=/usr/bin/pg_dump] [--pg_restore=/usr/bin/pg_restore] [--help]
544
545 =head1 OPTIONS
546
547 =over
548
549 =item --output - Directory where to output dump files. Defaults to current directory.
550
551 =item --compressor - path to compressor that should be used to compress
552 data. Default is empty, which doesn't compress, and you'll usually want
553 something like gzip.
554
555 =item --jobs - how many concurrent processes to run when dumping data to
556 tables. Defaults to 1.
557
558 =item --max-size - Minimal size of table (pg_relation_size()) (in kilobytes)
559 before fast.dump will try to split it into many separate blocks. Defaults to
560 10240 (10MB).
561
562 =item --psql - path to psql program. Defaults to "psql", which will use
563 $PATH environment variable to find it.
564
565 =item --pg_dump - path to pg_dump program. Defaults to "pg_dump", which will
566 use $PATH environment variable to find it.
567
568 =item --pg_restore - path to pg_restore program. Defaults to "pg_restore",
569 which will use $PATH environment variable to find it.
570
571 =item --help - shows information about usage of the program.
572
573 =back
574
575 All options can be given in abbreviated version, using single dash character
576 and first letter of option, like:
577
578     fast.dump -o /tmp -c bzip2 -j 16
579
580 Database connection details should be given using PG* environment variables.
581
582 =head1 DESCRIPTION
583
584 fast.dump is used to make very fast, although requiring special attention,
585 database dumps.
586
587 It works with PostgreSQL database, and will produce consistent dump only if
588 there are no other connections to datbaase (other than fast.dump itself).
589
590 Generated dumps have form of directory with many files inside, and should be
591 loaded using fast.restore counterpart tool.
592
Note: See TracBrowser for help on using the browser.