File: //proc/self/root/proc/self/root/scripts.20110531.215904.25158/pkgacct
#!/usr/bin/perl
# cpanel - pkgacct Copyright(c) 2010 cPanel, Inc.
# All rights Reserved.
# copyright@cpanel.net http://cpanel.net
# This code is subject to the cPanel license. Unauthorized copying is prohibited
package Script::Pkgacct;
use strict;
require 5.006;
BEGIN {
unshift @INC, '/usr/local/cpanel';
if ( defined $ARGV[0] && $ARGV[0] eq '--allow-override' ) {
shift(@ARGV);
if ( -e '/var/cpanel/lib/Whostmgr/Pkgacct/pkgacct' && -x _ ) {
exec( '/var/cpanel/lib/Whostmgr/Pkgacct/pkgacct', @ARGV );
}
}
}
use strict;
use bytes; #required for mysqldumpdb
use Cwd ();
use Fcntl ();
use Cpanel::ConfigFiles ();
use Cpanel::Tar ();
use Cpanel::MD5 ();
use Cpanel::Logger ();
use Cpanel::SafeSync ();
use Cpanel::Config::LoadCpUserFile ();
use Cpanel::Config::LoadCpConf ();
use Cpanel::DiskLib ();
use Cpanel::PwCache ();
use Cpanel::AccessIds::SetUids ();
use Cpanel::Encoder::URI ();
use Cpanel::HttpUtils::SSL ();
use Cpanel::Config::userdata::ApacheConf ();
use Cpanel::Config::userdata::Load ();
use Cpanel::Filesys ();
use Cpanel::Limits ();
use Cpanel::SimpleSync::CORE ();
use Cpanel::Hulk::Constants (); #issafe #nomunge
use Cpanel::Locale (); #issafe #nomunge
use Cpanel::Locale::Utils::3rdparty (); #issafe #nomunge
use Cpanel::Locale::Utils::Display (); #issafe #nomunge
use Cpanel::DomainIp ();
use Cpanel::SSLPath ();
use Cpanel::DnsUtils::AskDnsAdmin ();
use Cpanel::SafeRun::Simple ();
use Cpanel::SafeRun::Errors ();
use Cpanel::StringFunc::Match ();
use Cpanel::MysqlUtils ();
use Cpanel::PostgresUtils ();
use Cpanel::PwDiskCache ();
use Cpanel::Config::userdata ();
use Cpanel::BWFiles ();
use Cpanel::UserFiles ();
use Cpanel::Config::Backup ();
use Cpanel::AdminBin ();
eval {
local $SIG{__DIE__};
require Lchown;
} if !exists $INC{'Lchown.pm'};
my $has_lchown = exists $INC{'Lchown.pm'} ? 1 : 0;
eval {
local $SIG{__DIE__};
require Digest::MD5;
} if !exists $INC{'Digest/MD5.pm'};
if ( $ARGV[0] eq "--bincheck" ) {
print "BinCheck ok\n";
exit();
}
my $is_incremental_backup;
$Cpanel::SimpleSync::CORE::sync_contents_check = 0;
## Constant (for split files) moved to package scope variable; redefined in test script
our $splitfile_partsize = 100_000_000;
__PACKAGE__->script(@ARGV) unless caller();
sub script {
my ( $class, @argv ) = @_;
my %SECURE_PWCACHE;
tie %SECURE_PWCACHE, 'Cpanel::PwDiskCache', 'load_callback' => \&Cpanel::PwCache::load, 'validate_callback' => \&Cpanel::PwCache::validate;
Cpanel::PwCache::init( \%SECURE_PWCACHE );
my $tarcfg = Cpanel::Tar::load_tarcfg();
my ( $status, $message ) = Cpanel::Tar::checkperm();
exit 1 if !$status; # No need to give message. Handled in checkperm routine
my $loaded_signals;
#this is just a fallback in case get_signal_num_from_name fails;
my %signo = ( 'USR1' => 10, 'USR2' => 12 );
my $ppid = getppid();
my $httpuser = 'nobody';
my $httpgid = ( getgrnam($httpuser) )[2];
#recusive, copy symlinks as symlinks, preserve permissions,
#preserve times, preserve devices
$| = 1;
delete $ENV{'LD_LIBRARY_PATH'};
$is_incremental_backup = $ENV{'INCBACKUP'};
my $now = time();
my ( $user, $tarroot, $OPTS, $new_mysql_version ) = process_args(@argv);
my @pwent;
if ( $> == 0 ) {
push @pwent, Cpanel::PwCache::getpwnam($user);
}
else {
push @pwent, split( /:/, Cpanel::AdminBin::adminrun( 'security', 'READPASSWD', $user ) );
}
my ( $pass, $uid, $gid, $syshomedir, $shell, $passwd_mtime, $shadow_mtime ) = @pwent[ 1, 2, 3, 7, 8, 11, 12 ];
if ( !$uid ) { die "Unable to get user id for user $user"; }
my $cpuser_ref = Cpanel::Config::LoadCpUserFile::loadcpuserfile($user);
if ( !scalar keys %{$cpuser_ref} ) {
die "Unable to load cPanel user data.\n";
}
my $cpconf = Cpanel::Config::LoadCpConf::loadcpconf();
my $backupconf = Cpanel::Config::Backup::load();
my $system = $^O;
my $usedomainlookup = 0;
if ( $> == 0 ) {
$ENV{'USER'} = 'root';
$ENV{'HOME'} = '/root';
}
else {
require Cpanel::DomainLookup;
$usedomainlookup = 1;
}
if ( $user eq "root" ) {
print "You cannot copy the root user.\n";
exit;
}
if ( substr( $tarroot, 0, 1 ) eq "~" ) {
my $tuser = substr( $tarroot, 1 );
$tarroot = ( Cpanel::PwCache::getpwnam($tuser) )[7];
}
my $isuserbackup = 0;
my $isbackup = 0;
my $prefix = '';
if ( $OPTS->{'backup'} ) {
$isbackup = 1;
$prefix = '';
}
elsif ( $OPTS->{'userbackup'} ) {
$isuserbackup = 1;
$isbackup = 1;
my ( $sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst ) = localtime(time);
$mon++;
$year += 1900;
$sec = sprintf( "%02d", $sec );
$min = sprintf( "%02d", $min );
$hour = sprintf( "%02d", $hour );
$prefix = "backup-${mon}.${mday}.${year}_${hour}-${min}-${sec}_";
}
else {
$prefix = 'cpmove-';
}
my $localzonesonly = ( defined $backupconf->{'LOCALZONESONLY'} && $backupconf->{'LOCALZONESONLY'} eq 'yes' ) ? 1 : 0;
my $compressflag = 'z';
my $archiveext = 'tar.gz';
my $compress = 1;
if ( $OPTS->{'nocompress'} ) {
$compress = 0;
$compressflag = '';
$archiveext = 'tar';
}
my $pkg_version = 9.0;
print "pkgacct started.\n";
print "pkgacct version $pkg_version - user : $user - archive version: $OPTS->{'archive_version'} - running with uid $<\n";
my $split = 0;
if ( $OPTS->{'split'} ) {
print "We will be splitting the archive!!\n";
$split = 1;
}
$prefix =~ s/\s//g;
$prefix =~ s/\n//g;
if ( $tarroot eq "" || !-d "$tarroot" ) {
if ( $OPTS->{'backup'} ) {
print "Bailing out.. you must set a valid destination for backups\n";
exit;
}
$tarroot = Cpanel::Filesys::getmntpoint();
}
if ( $> == 0 && ( !($isbackup) ) ) {
my $output = Cpanel::SafeRun::Errors::saferunallerrors( "rdate", "-s", "rdate.cpanel.net" );
if ( $output =~ /Could not read data/ ) {
print "Rdate bug detected. Please update to rdate-1.1\n";
}
}
$0 = "pkgacct - ${user} - av: $OPTS->{'archive_version'}";
if ( $> != 0 && !$ENV{'REMOTE_PASSWORD'} ) {
print "*** The REMOTE_PASSWORD variable is missing from the enviroment and we are not running with root access. mySQL backups will fail. ***\n";
}
my $homedir = $syshomedir;
my $abshomedir = $homedir; #reversed
if ( -l $homedir ) {
$homedir = readlink($homedir);
}
my $dns = $cpuser_ref->{'DOMAIN'};
my $suspended = ( $cpuser_ref->{'SUSPENDED'} ? 1 : 0 );
my @DNS = ($dns);
push @DNS, @{ $cpuser_ref->{'DOMAINS'} } if ref $cpuser_ref->{'DOMAINS'} && @{ $cpuser_ref->{'DOMAINS'} };
my @WWWDNS = map { 'www.' . $_ } @DNS;
my $cpargs = ( $system =~ /freebsd/i ? '-Rpf' : '-a' );
my $dns_list = join( '|', map { quotemeta($_) } @DNS );
my $sql_dnslist = join( '|', map { quotemeta($_) } grep( !/\*/, @DNS ) );
my $dns_list_with_www = join( '|', ( $dns_list, map { quotemeta($_) } @WWWDNS ) );
if ( !$dns ) {
print "Unable to find domain name for $user\n";
exit;
}
my $ip;
if ($usedomainlookup) {
require Cpanel::UserDomainIp;
$ip = Cpanel::UserDomainIp::getdomainip($dns);
}
else {
$ip = Cpanel::DomainIp::getdomainip($dns);
}
my $ssldomain = Cpanel::HttpUtils::SSL::getsslhostbyip($ip);
if ( !grep { /\A$ssldomain\z/ } ( @WWWDNS, @DNS ) ) {
$ssldomain = '';
}
if ( !$prefix && ( $tarroot eq '/' || $tarroot eq '/home' || $tarroot eq Cpanel::Filesys::getmntpoint() ) ) {
print "Bailing out .. no prefix set and tarroot is / or /home\n";
exit;
}
my $work_dir = ( $is_incremental_backup && ( $user eq 'files' || $user eq 'dirs' ) ) ? "${tarroot}/${prefix}user_${user}" : "${tarroot}/${prefix}${user}";
if ($prefix) {
if ( -d $work_dir && !-l $work_dir ) {
if ( !$is_incremental_backup ) {
system( "rm", "-rf", $work_dir );
}
}
if ( -d "${work_dir}-split"
&& !-l "${work_dir}-split" ) {
if ( !$is_incremental_backup ) {
system( "rm", "-rf", "${work_dir}-split" );
}
}
if ( -f "${work_dir}.${archiveext}"
&& !-l "${work_dir}.${archiveext}" ) {
system( "rm", "-rf", "${work_dir}.${archiveext}" );
}
}
if ($isuserbackup) {
my $now = time();
if ( my $xpid = fork() ) {
sigsafe_blocking_waitpid($xpid);
}
else {
Cpanel::AccessIds::SetUids::setuids($user);
open( TMPF, ">", "$homedir/$prefix$user" );
print TMPF "s ${now}\n";
open( TMPF, ">", "$homedir/$prefix$user.$archiveext" );
print TMPF "s ${now}\n";
close(TMPF);
exit();
}
}
if ( !$is_incremental_backup && !$split ) {
open( CPM, ">", "$work_dir.$archiveext" );
close(CPM);
chmod( 0600, "$work_dir.$archiveext" );
}
elsif ($is_incremental_backup) { #add new dirs as needed
build_pkgtree($work_dir);
}
if ( !-e $work_dir ) {
build_pkgtree($work_dir);
}
else {
if ( !$is_incremental_backup ) {
my $part = 0;
while ( $part != 1024 ) {
if ( !-d "$work_dir.$part" ) {
system( 'mv', $work_dir, "$work_dir.$part" );
build_pkgtree($work_dir);
last;
}
$part++;
}
}
}
if ( !-e $work_dir || !-w _ ) {
print "...failed to create the working dir: $work_dir. You can specify an alternate directory like /tmp by running [$0 $user /tmp]\n";
exit(1);
}
# Write version of pkgacct - we cannot cache this -- we have to write it every time
# as we have no way of knowing if the file is up to date
# we cannot implement an mtime check
if ( open( my $ver_h, '>', "$work_dir/version" ) ) {
print {$ver_h} "pkgacct version: $pkg_version\n";
print {$ver_h} "archive version: $OPTS->{'archive_version'}\n";
close($ver_h);
}
my $homedir_mtime = ( lstat($homedir) )[9];
# "$work_dir/homedir_paths" is to be deprecated in favor of "$work_dir/meta/homedir_paths"
foreach my $file ( "$work_dir/homedir_paths", "$work_dir/meta/homedir_paths" ) {
if ($is_incremental_backup) {
my $file_change_time = ( lstat($file) )[9];
next
if (
$file_change_time && #file exists
$homedir_mtime < $now && #timewarp safety
$file_change_time > $homedir_mtime && #check to make sure the symlink or dir did not get changed on us
$passwd_mtime < $now && #timewarp safety
$file_change_time > $passwd_mtime #check to make sure their homedir did not change in the passwd file
);
}
if ( sysopen( my $home_fh, $file, &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
print {$home_fh} $homedir . "\n";
if ( $abshomedir ne $homedir ) { print {$home_fh} $abshomedir . "\n"; }
close($home_fh);
}
}
my $needs_mailserver = 1;
if ($is_incremental_backup) {
my $mailserver_mtime = ( lstat("$work_dir/meta/mailserver") )[9];
my $cpanel_config_mtime = ( lstat("/var/cpanel/cpanel.config") )[9];
$needs_mailserver = 0
if (
$mailserver_mtime && #file exists
$cpanel_config_mtime < $now && #timewarp safety
$mailserver_mtime < $now && #timewarp safety
$mailserver_mtime > $cpanel_config_mtime #check to make sure the file is newer then the cpanel config
);
}
if ( $needs_mailserver && open( my $mailserver_fh, '>', "$work_dir/meta/mailserver" ) ) {
print {$mailserver_fh} $cpconf->{'mailserver'} . "\n";
close($mailserver_fh);
}
my $ssldir = Cpanel::SSLPath::getsslroot();
print "Copying Reseller Config...";
if ( $> == 0 ) {
Cpanel::Limits::backup_reseller_config( $user, "$work_dir/resellerconfig" );
Cpanel::Limits::backup_reseller_limits( $user, "$work_dir/resellerconfig" );
}
print "Done\n";
print "Copying Suspension Info (if needed)...";
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/suspended/$user", "$work_dir/suspended" );
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/suspended/$user.lock", "$work_dir/suspended" );
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/suspendinfo/$user", "$work_dir/suspendinfo" );
print "Done\n";
print "Copying SSL Certificates, CSRS, and Keys...";
if ( $> == 0 ) {
## note: @DNS contains the 'www.' versions at this point
foreach my $domain ( @WWWDNS, @DNS, $ssldomain ) {
if ( -e "${ssldir}/certs/${domain}.crt" ) {
print "..${domain}.crt..";
Cpanel::SimpleSync::CORE::syncfile( "${ssldir}/certs/${domain}.crt", "$work_dir/sslcerts" );
}
if ( -e "${ssldir}/certs/${domain}.cabundle" ) {
print "..${domain}.cabundle..";
Cpanel::SimpleSync::CORE::syncfile( "${ssldir}/certs/${domain}.cabundle", "$work_dir/sslcerts" );
}
if ( -e "${ssldir}/certs/${domain}.csr" ) {
print "..${domain}.csr..";
Cpanel::SimpleSync::CORE::syncfile( "${ssldir}/certs/${domain}.csr", "$work_dir/sslcerts" );
}
if ( -e "${ssldir}/private/${domain}.key" ) {
print "..${domain}.key..";
Cpanel::SimpleSync::CORE::syncfile( "${ssldir}/private/${domain}.key", "$work_dir/sslkeys" );
}
}
}
else {
my @SSLTYPES = ( "csr", "key", "crt", "cabundle" );
foreach my $ssltype (@SSLTYPES) {
my $items = Cpanel::SafeRun::Simple::saferun( '/usr/local/cpanel/bin/sslwrap', 'LIST', $dns, $ssltype );
my (@SSLFILES) = split( /\n/, $items );
foreach my $host (@SSLFILES) {
my $ssldata = Cpanel::SafeRun::Simple::saferun( '/usr/local/cpanel/bin/sslwrap', 'FETCH', $host, $ssltype );
if ( $ssltype eq "csr" ) {
print "..${host}.csr..";
open( CSR, ">", "$work_dir/sslcerts/${host}.csr" );
print CSR $ssldata;
close(CSR);
}
elsif ( $ssltype eq "crt" ) {
print "..${host}.crt..";
open( CRT, ">", "$work_dir/sslcerts/${host}.crt" );
print CRT $ssldata;
close(CRT);
}
elsif ( $ssltype eq "cabundle" ) {
print "..${host}.cabundle..";
open( CAB, ">", "$work_dir/sslcerts/${host}.cabundle" );
print CAB $ssldata;
close(CAB);
}
elsif ( $ssltype eq "key" ) {
print "..${host}.key..";
open( KEY, ">", "$work_dir/sslkeys/${host}.key" );
print KEY $ssldata;
close(KEY);
}
}
}
}
print "Done\n";
print "Copying Domain Keys....";
my $domainkeys_dir = '/var/cpanel/domain_keys';
foreach my $domain ( $dns, @{ $cpuser_ref->{'DOMAINS'} } ) {
if ( -e "$domainkeys_dir/public/$domain" ) {
Cpanel::SimpleSync::CORE::syncfile( "$domainkeys_dir/public/$domain", "$work_dir/domainkeys/public/" );
}
if ( -e "$domainkeys_dir/private/$domain" ) {
Cpanel::SimpleSync::CORE::syncfile( "$domainkeys_dir/private/$domain", "$work_dir/domainkeys/private/" );
}
}
print "Done\n";
print "Copying Counter Data....";
opendir( my $counters, '/var/cpanel/Counters' );
while ( my $file = readdir($counters) ) {
next if ( $file =~ /^\./ );
if ( ( stat( '/var/cpanel/Counters/' . $file ) )[4] == $uid ) {
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/Counters/$file", "$work_dir/counters" );
}
}
closedir($counters);
print "Done\n";
print "Copying Bandwidth Data....";
my $bw_dir = Cpanel::BWFiles::default_dir();
## Case 17778: copies the $user-all, -ftp, -http, -imap, -pop3, and -smtp files.
my @bwfiles;
if ( opendir( my $bwd_h, $bw_dir ) ) {
#regexs precompiled and not looped for speed
my $bw_regex = qr/^(?:_wildcard_\.)?(?:\Q$user\E|$dns_list)(?:-|$)/o;
## FIXME: similar to case 9010, we need to consolidate the places where .rrd files are
## defined. Otherwise every time they change, we eventually need to open a few cases
## (upon account termination, transfers, backups/restores maybe). Seems we are missing
## the files denoted by domain name below...
## note: see case 39209
@bwfiles =
map { $bw_dir . '/' . $_ }
grep { $_ =~ $bw_regex } readdir($bwd_h);
closedir $bwd_h;
}
my ( $filename, $orig_filename );
foreach my $rrd (@bwfiles) {
$filename = $rrd;
$filename =~ s{\Q$bw_dir\E/}{};
$orig_filename = $filename;
if ( $rrd =~ /\.rrd$/ ) {
$filename =~ s/\.rrd$/.xml/;
}
else {
print "..$orig_filename..";
Cpanel::SimpleSync::CORE::syncfile( $rrd, "$work_dir/bandwidth" );
next;
}
next if !_file_needs_backup( $rrd, "$work_dir/bandwidth/$filename", $orig_filename );
print "..$orig_filename..";
if ( sysopen my $to, "$work_dir/bandwidth/$filename", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW, 0644 ) {
my $pid;
if ( $pid = fork() ) {
}
else {
open( STDOUT, ">&=" . fileno($to) );
exec( '/usr/local/cpanel/3rdparty/bin/rrdtool', 'dump', $rrd );
exit 1;
}
close $to;
sigsafe_blocking_waitpid($pid);
}
}
print "Done\n";
print "Copying Dns Zones....";
if ( $> == 0 ) {
my %ZONES;
my $encoded_zones = Cpanel::DnsUtils::AskDnsAdmin::askdnsadmin( 'GETZONES', $localzonesonly, join( ',', @DNS ) );
foreach my $zonedata ( split( /\&/, $encoded_zones ) ) {
my ( $name, $value ) = split( /=/, $zonedata );
next if ( !$name );
$name =~ s/^cpdnszone-//g;
$name = Cpanel::Encoder::URI::uri_decode_str($name);
print "...$name...";
if ( length($value) > 10 ) {
sysopen( ZONE, "$work_dir/dnszones/$name.db", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
my $temp = Cpanel::Encoder::URI::uri_decode_str($value);
syswrite( ZONE, $temp, length $temp );
close(ZONE);
}
}
}
print "Done\n";
print "Copying Mail files....";
foreach my $domain (@DNS) {
if ( -e "/etc/valiases/${domain}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/etc/valiases/${domain}", "$work_dir/va" );
}
if ( -e "/etc/vdomainaliases/${domain}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/etc/vdomainaliases/${domain}", "$work_dir/vad" );
}
if ( -e "/etc/vfilters/${domain}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/etc/vfilters/${domain}", "$work_dir/vf" );
}
}
print "Done\n";
print "Copying frontpage files....";
{
my $site_dir = '/usr/local/apache/conf/sites';
my $fp_dir = '/usr/local/frontpage';
my $sites_regex = qr/^(?:$dns_list_with_www)\.conf$/o;
if ( -e $site_dir ) {
opendir( my $dir_h, $site_dir );
my @sites = grep { $_ =~ $sites_regex } readdir($dir_h);
closedir($dir_h);
foreach my $file (@sites) {
print "...$file (site)...";
Cpanel::SimpleSync::CORE::syncfile( $site_dir . '/' . $file, "$work_dir/fp/sites" );
}
}
my $fp_regex = qr/^(?:$dns_list_with_www)(?:\:|\.cnf)/o;
opendir( my $dir_h, $fp_dir );
my @sites = grep { $_ =~ $fp_regex } readdir($dir_h);
closedir($dir_h);
foreach my $file (@sites) {
print "...$file (fpcfg)...";
Cpanel::SimpleSync::CORE::syncfile( $site_dir . '/' . $file, "$work_dir/fp/sites" );
}
print "Done\n";
}
print "Copying proftpd file....";
if ( $> == 0 ) {
Cpanel::SimpleSync::CORE::syncfile( "/etc/proftpd/${user}", "$work_dir/proftpdpasswd" );
}
else {
_simple_exec_into_file( "$work_dir/proftpdpasswd", [ '/usr/local/cpanel/bin/ftpwrap', 'DUMP', '0', '0' ] );
# chmod( 0600, "$work_dir/proftpdpasswd" ); _simple_exec_into_file already sets 0600
}
print "Done\n";
if ( $ENV{'pkgacct-logs'} eq "yes" || $ENV{'pkgacct-cpbackup'} eq "" ) {
print "Copying www logs....";
my $log_regex_list = '^(?:' . join( '|', map { quotemeta($_) . '$', quotemeta( 'www.' . $_ ) . '$', quotemeta( 'ftp.' . $_ ) . '$', quotemeta($_) . '-', quotemeta( 'www.' . $_ ) . '-', quotemeta( 'ftp.' . $_ ) . '-' } @DNS ) . ')';
my $log_regex = qr{$log_regex_list};
my $total_log_size = 0;
my @logfiles;
if ( opendir( my $domlog_dh, '/usr/local/apache/domlogs' ) ) {
@logfiles = map { $total_log_size += ( stat( '/usr/local/apache/domlogs/' . $_ ) )[7]; $_; } grep { $_ =~ $log_regex } readdir($domlog_dh);
closedir($domlog_dh);
}
if (@logfiles) { #only fork if we have log files to copy
my $log_file_copy_ref = sub {
## TODO?: should this copy over domlogs in $user dir as well?
## No they are just links -jnk 4.29.10
foreach my $logfile (@logfiles) {
print "...$logfile...";
Cpanel::SimpleSync::CORE::syncfile( "/usr/local/apache/domlogs/$logfile", "$work_dir/logs", 0, 0, 1 );
}
};
print "...log file sizes [$total_log_size byte(s)]...";
if ( $total_log_size < ( 1024 * 1024 * 6 ) ) { #no forking if less then < 6meg
$log_file_copy_ref->();
}
else {
run_dot_event(
sub {
$0 = "pkgacct - ${user} - log copy child";
$log_file_copy_ref->();
}
);
}
}
print "Done\n";
}
{
my ( $userconfig, $userconfig_work ) = ( Cpanel::UserFiles::userconfig_path($user), "$work_dir/userconfig" );
mkdir($userconfig_work) unless -d $userconfig_work;
if ( opendir( my $dh, $userconfig ) ) {
print 'Copy userconfig...';
my @files = map { "$userconfig/$_" } grep { $_ ne '.' && $_ ne '..' } readdir($dh);
close($dh);
foreach my $file (@files) {
Cpanel::SimpleSync::CORE::syncfile( $file, $userconfig_work );
}
print "Done\n";
}
}
print 'Copy userdata...';
{
my @sync_list;
my @userdatafiles;
my $userdata = "/var/cpanel/userdata/$user";
if ( opendir( my $dir_h, $userdata ) ) {
@userdatafiles = grep { !/cache$/ && !/^\.\.?$/ } readdir $dir_h;
close $dir_h;
foreach my $userdatafile (@userdatafiles) {
my $config = Cpanel::Config::userdata::Load::load_userdata( $user, $userdatafile );
foreach my $key (qw/custom_vhost_template_ap1 custom_vhost_template_ap2/) {
if ( exists $config->{$key} ) {
push @sync_list, [ $config->{$key}, "$work_dir/userdata" ] if -e $config->{$key},;
Cpanel::SimpleSync::CORE::syncfile( $config->{$key}, "$work_dir/userdata" );
}
}
push @sync_list, [ $userdata . '/' . $userdatafile, "$work_dir/userdata" ] if -e $userdata . '/' . $userdatafile;
}
}
if (@sync_list) { #only fork if we have to
my $user_data_copy_ref = sub {
foreach my $sync_ref (@sync_list) {
Cpanel::SimpleSync::CORE::syncfile( $sync_ref->[0], $sync_ref->[1] );
}
};
if ( $#sync_list > 10 ) { #if we are only copying one or two on
run_dot_event(
sub {
$0 = "pkgacct - ${user} - userdata";
$user_data_copy_ref->();
}
);
}
else {
$user_data_copy_ref->();
}
}
}
print "Done\n";
print 'Copy custom virtualhost templates...';
{
my @sync_list;
my @mkdir_list;
my $main_userdata = Cpanel::Config::userdata::Load::load_userdata( $user, 'main' );
my $base = '/usr/local/apache/conf/userdata/';
foreach my $domain ( $main_userdata->{main_domain}, @{ $main_userdata->{sub_domains} }, keys %{ $main_userdata->{addon_domains} } ) {
foreach my $path ( "$base/ssl/1/$user/$domain/", "$base/ssl/2/$user/$domain/", "$base/std/1/$user/$domain/", "$base/std/2/$user/$domain/" ) {
if ( -e $path ) {
if ( $path =~ m{(s(?:(?:td)|(?:sl)))/([12])} ) {
my $proto = $1;
my $ver = $2;
push @mkdir_list, "$work_dir/httpfiles/$proto/", "$work_dir/httpfiles/$proto/$ver/", "$work_dir/httpfiles/$proto/$ver/$domain/";
if ( opendir( my $dir_fh, $path ) ) {
push @sync_list, map { [ $path . '/' . $_, "$work_dir/httpfiles/$proto/$ver/$domain/$_" ] } grep { !/^\./ } readdir($dir_fh);
closedir($dir_fh);
}
}
}
}
}
if (@sync_list) { #only fork if we have to
run_dot_event(
sub {
$0 = "pkgacct - ${user} - custom virtualhost templates copy child";
foreach my $dir (@mkdir_list) {
mkdir( $dir, 0700 );
}
foreach my $sync_ref (@sync_list) {
Cpanel::SimpleSync::CORE::syncfile( $sync_ref->[0], $sync_ref->[1] );
}
}
);
}
}
print "Done\n";
if ( $ENV{'pkgacct-psql'} eq "yes" || $ENV{'pkgacct-cpbackup'} eq "" ) {
my $postgresuser = Cpanel::PostgresUtils::getpostgresuser();
if ($postgresuser) {
my $postgresadmin = -e '/usr/local/cpanel/bin/postgresadmin.pl' ? '/usr/local/cpanel/bin/postgresadmin.pl' : '/usr/local/cpanel/bin/postgresadmin';
my $data_ref = _run_admin_backupcmd( ( $> == 0 ? ( $postgresadmin, $uid ) : ('/usr/local/cpanel/bin/postgreswrap') ), 'BACKUP' );
my $pg_active = $data_ref->{'PING'};
chomp $pg_active;
if ( $pg_active && $pg_active eq 'PONG' ) {
my $users;
my @DBS = grep { !/^\s*$/ } split( /\n/, $data_ref->{'LISTDBS'} );
print "Grabbing PostgreSQL databases...";
if (@DBS) { #only fork if we have to
run_dot_event(
sub {
foreach my $db (@DBS) {
$db =~ s/\n//g;
_simple_exec_into_file(
"$work_dir/psql/$db.tar",
(
$> == 0
? [ $postgresadmin, $uid, 'PGDUMP', $db, '0' ]
: [ '/usr/local/cpanel/bin/postgreswrap', 'PGDUMP', $db, '0' ]
)
);
if ( !-e "$work_dir/psql/$db.tar" ) {
Cpanel::Logger::warn("Unable to write archive: $!");
}
}
}
);
}
print "Done\n";
print "Grabbing PostgreSQL privileges...";
if ( sysopen( my $fh, "$work_dir/psql_users.sql", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
print {$fh} $data_ref->{'DUMPSQL_USERS'};
close($fh);
}
if ( sysopen( my $fh, "$work_dir/psql_grants.sql", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
print {$fh} $data_ref->{'DUMPSQL_GRANTS'};
close($fh);
}
print "Done\n";
}
}
}
if ( $ENV{'CPUWATCH'} eq "1" ) {
print "Leaving timeout safety mode\n";
#allow our parent to suspend us again
## $loaded_signals and %signo passed in by reference, as they were formerly both global, and assigned
## to in the subroutine...
my $signal = get_signal_num_from_name( 'USR2', \$loaded_signals, \%signo );
kill $signal, $ppid;
}
print "Copying mailman lists and archives....";
my %LISTTARGETS = (
'mm' => _get_matching_files( '/usr/local/cpanel/3rdparty/mailman/lists', "_(?:$dns_list)" . '$' ),
'mms' => _get_matching_files( '/usr/local/cpanel/3rdparty/mailman/suspended.lists', "_(?:$dns_list)" . '$' ),
'mma/pub' => _get_matching_files( '/usr/local/cpanel/3rdparty/mailman/archives/public', "_(?:$dns_list)" . '(?:\.mbox)?$' ),
'mma/priv' => _get_matching_files( '/usr/local/cpanel/3rdparty/mailman/archives/private', "_(?:$dns_list)" . '(?:\.mbox)?$' ),
);
foreach my $target ( keys %LISTTARGETS ) {
my $file_list = $LISTTARGETS{$target};
if ( ref $file_list && @$file_list ) {
foreach my $dir (@$file_list) {
my @path = split( /\/+/, $dir );
my $base_file = pop @path;
mkdir( $work_dir . '/' . $target . '/' . $base_file, 0700 ) if !-e $work_dir . '/' . $target . '/' . $base_file;
Cpanel::SafeSync::safesync(
'user' => 'mailman',
'source' => $dir,
'dest' => $work_dir . '/' . $target . '/' . $base_file,
'chown' => 0,
'isbackup' => ( $isbackup || $isuserbackup ),
'delete' => ( $is_incremental_backup ? 1 : 0 ),
'verbose' => 0
);
}
}
}
print "Done\n";
if ( !$OPTS->{'skiphomedir'} ) {
homedir_block( $work_dir, $gid, $isbackup, $isuserbackup, $homedir, $user, $is_incremental_backup, $tarcfg );
}
else {
notate_homedir_nobodyfiles( $homedir, $work_dir );
}
# Record db map status
if ( -e '/var/cpanel/cpanel.config' ) {
if ( open( my $fh, '>', "$work_dir/meta/dbprefix" ) ) {
if ( !exists $cpconf->{'database_prefix'} || $cpconf->{'database_prefix'} == 1 ) {
print {$fh} 1;
}
else {
print {$fh} 0;
}
close $fh;
}
}
# Copying the DB MAP file
if ( -e "/var/cpanel/databases/${user}.yaml" ) {
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/databases/${user}.yaml", "$work_dir/meta/dbmap.yaml" );
}
if ( $ENV{'CPUWATCH'} eq "1" ) {
print "Entering timeout safety mode\n";
#prevent our parent from suspending us to prevent a mysql timeout
my $signal = get_signal_num_from_name( 'USR1', \$loaded_signals, \%signo );
kill $signal, $ppid;
}
#mysql block requires loading Config so we do it after we fork
{
if ( $ENV{'pkgacct-mysql'} eq 'yes' || !$ENV{'pkgacct-cpbackup'} ) {
my @mysqldumps;
my $mysqldump = Cpanel::MysqlUtils::find_mysqldump();
my $mysqldatadir = Cpanel::MysqlUtils::getmysqldir() || '/var/lib/mysql';
print 'Determining mysql dbs...';
my $mysqluser = $user;
$mysqluser =~ s/-//g;
my $users;
my $mysqladmin = -e '/usr/local/cpanel/bin/mysqladmin.pl' ? '/usr/local/cpanel/bin/mysqladmin.pl' : '/usr/local/cpanel/bin/mysqladmin';
system( '/scripts/unsuspendmysqlusers', $user ) if $suspended;
my $data_ref = _run_admin_backupcmd( ( $> == 0 ? ( $mysqladmin, $uid ) : ('/usr/local/cpanel/bin/mysqlwrap') ), 'BACKUP' );
system( '/scripts/suspendmysqlusers', $user ) if $suspended;
my %LASTUPDATETIMES = map { ( split( /=/, $_ ) )[ 0, 1 ] } grep { !/^\s+$/ } split( /\n/, $data_ref->{'LASTUPDATETIMES'} );
print "Saving mysql privs...";
if ( $data_ref->{'DUMPSQL'} && _db_needs_backup( 'mysql', 'mysql', $mysqldatadir, "$work_dir/mysql-timestamps/mysql", ["$work_dir/mysql.sql"], \%LASTUPDATETIMES ) ) {
if ( sysopen( my $fh, "$work_dir/mysql.sql", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
print {$fh} "-- cPanel mysql backup\n" . $data_ref->{'DUMPSQL'};
close($fh);
my $time_stamp_file = "$work_dir/mysql-timestamps/mysql";
if ( open( my $time_stamp_file_fh, '>', $time_stamp_file ) ) {
print {$time_stamp_file_fh} time();
close($time_stamp_file_fh);
}
}
}
print "Done\n";
my @DBS = grep { !/^\s*$/ } split( /\n/, $data_ref->{'LISTDBS'} );
my $mysql = Cpanel::MysqlUtils::find_mysql();
my $old_mysql_version = Cpanel::MysqlUtils::mysqlversion($user);
print "...mysql version: $old_mysql_version...";
my @downgrade_options = downgrade_mysql( $old_mysql_version, $new_mysql_version );
my $mysqldump_ver = Cpanel::MysqlUtils::mysqldump_version();
print "...mysqldump version: $mysqldump_ver...";
if ( $mysqldump_ver >= 5.0 && $old_mysql_version >= 5.0 ) {
push @downgrade_options, '-R';
}
elsif ( $mysqldump_ver < 5.0 && $old_mysql_version >= 5.0 ) {
print "\nLocal mysql tools are version 4.x and remote mysql is 5.x. Unable to backup stored procedures.\n";
}
foreach my $db (@DBS) {
$db =~ s/\n//g;
next if ( !_db_needs_backup( 'mysql', $db, $mysqldatadir, "$work_dir/mysql-timestamps/$db", ["$work_dir/mysql/${db}.sql"], \%LASTUPDATETIMES ) );
if ( $> == 0 || !$ENV{'REMOTE_PASSWORD'} ) {
push @mysqldumps, { 'options' => [ @downgrade_options, '-c', '-Q', '-q' ], 'db' => $db, 'time-stamp-file' => "$work_dir/mysql-timestamps/$db", 'file' => "$work_dir/mysql/${db}.sql" };
}
else {
push @mysqldumps, { 'options' => [ @downgrade_options, '-c', '-Q', '-q', '-u' . $user, '-p' . $ENV{'REMOTE_PASSWORD'} ], 'time-stamp-file' => "$work_dir/mysql-timestamps/$db", 'db' => $db, 'file' => "$work_dir/mysql/${db}.sql" };
}
}
# Horde
my $horde_db = "$work_dir/mysql/horde.sql";
if ( $> == 0 && Cpanel::MysqlUtils::db_exists('horde') && _db_needs_backup( 'mysql', 'horde', $mysqldatadir, "$work_dir/mysql-timestamps/horde", [$horde_db], \%LASTUPDATETIMES ) ) {
my @options = ( @downgrade_options, '-c', '-Q', '-q', '-t', '-w' );
#FIXME: make this one dump call -- NOT POSSIBLE since mysqldump only allows one -w call per call
push @mysqldumps, {
'options' => [ @options, qq{owner_id='$user' or owner_id REGEXP '@(${sql_dnslist})\$'} ],
'time-stamp-file' => "$work_dir/mysql-timestamps/horde",
'db' => 'horde',
'file' => $horde_db,
'table' => 'turba_objects',
'append' => 0, #FIRST ONE SHOULD NOT BE APPEND
};
push @mysqldumps,
{
'options' => [ @options, qq{pref_uid='$user' or pref_uid REGEXP '@(${sql_dnslist})\$'} ],
'db' => 'horde',
'file' => $horde_db,
'table' => 'horde_prefs',
'append' => 1,
};
## case 28546: change from 'event_uid' column to 'calendar_id'
push @mysqldumps,
{
'options' => [ @options, qq{calendar_id='$user' or calendar_id REGEXP '@(${sql_dnslist})\$'} ],
'db' => 'horde',
'file' => $horde_db,
'table' => 'kronolith_events',
'append' => 1,
};
push @mysqldumps,
{
'options' => [ @options, qq{vfb_owner='$user' or vfb_owner REGEXP '@(${sql_dnslist})\$'} ],
'db' => 'horde',
'file' => $horde_db,
'table' => 'kronolith_storage',
'append' => 1,
};
push @mysqldumps,
{
'options' => [ @options, qq{memo_owner='$user' or memo_owner REGEXP '@(${sql_dnslist})\$'} ],
'db' => 'horde',
'file' => $horde_db,
'table' => 'mnemo_memos',
'append' => 1,
};
push @mysqldumps,
{
'options' => [ @options, qq{task_owner='$user' or task_owner REGEXP '@(${sql_dnslist})\$'} ],
'db' => 'horde',
'file' => $horde_db,
'table' => 'nag_tasks',
'append' => 1,
};
}
##############################
# RoundCube
my $now_before_roundcube = time();
my $round_db = "$work_dir/mysql/roundcube.sql";
if ( $> == 0 && exists $cpconf->{'roundcube_db'}
and ( $cpconf->{'roundcube_db'} eq 'sqlite' ) ) {
## pass: roundcube.db is in homedir.tar. The logic reads better if this is blank block.
}
elsif ( $> == 0 && Cpanel::MysqlUtils::db_exists('roundcube') && _db_needs_backup( 'mysql', 'roundcube', $mysqldatadir, "$work_dir/mysql-timestamps/roundcube", [$round_db], \%LASTUPDATETIMES ) ) {
my $ids;
if ( exists $data_ref->{'ROUNDCUBEIDS'} ) {
chomp( $ids = $data_ref->{'ROUNDCUBEIDS'} );
}
else {
# if we change this here, we must change it in mysqladmin as well
my $ids = Cpanel::SafeRun::Simple::saferun( $mysql, 'roundcube', '-B', '-ss', '-e', "SELECT user_id FROM users WHERE username REGEXP '@(${sql_dnslist})\$' or username = '$user'" );
$ids = join ',', split /\n/, $ids if $ids;
}
if ($ids) {
## NOTE: the src server will no longer suppress the 'CREATE TABLE' statements. This accounts for the case where
## the dest server has performed the conversion (which archives and deletes the database). ???Would it be better
## to re-create the database with the shipped 'mysql.initial.sql'?
## this also necessitates a '--force' when reimporting the .sql file
## note: considered adding --no-create-db and --create-options, but these looked to be default behavior
my @options = (
@downgrade_options,
'--skip-add-drop-table',
'--complete-insert',
'--quote-names',
'--quick',
'--where',
);
push @mysqldumps,
{
'options' => [ @options, qq{user_id IN ($ids)} ],
'db' => 'roundcube',
'time-stamp-file' => "$work_dir/mysql-timestamps/roundcube",
'file' => $round_db,
## TODO (case 45191): add 'contactgroupmembers', 'contactgroups'
## TODO (case 45191): consider removing 'messages'
'table' => [ 'users', 'messages', 'identities', 'contacts' ],
'append' => 0
};
}
else {
my $time_stamp_file = "$work_dir/mysql-timestamps/roundcube";
if ( open( my $time_stamp_file_fh, '>', $time_stamp_file ) ) {
print {$time_stamp_file_fh} $now_before_roundcube;
close($time_stamp_file_fh);
if ( open( my $sql_fh, '>>', $round_db ) ) {
print {$sql_fh} "-- Roundcube database place holder file\n";
close($sql_fh);
}
}
}
}
print "...Done\n";
##############################
print 'Storing mysql dbs...';
if (@mysqldumps) { #only fork if we actually have databases to backup
run_dot_event(
sub {
$0 = "pkgacct - ${user} - mysql copy child";
foreach my $dumpref (@mysqldumps) {
mysqldumpdb($dumpref);
}
}
);
}
print "...Done\n";
}
}
print "Copying cpuser file.......";
Cpanel::SimpleSync::CORE::syncfile( "/var/cpanel/users/$user", "$work_dir/cp" );
print "Done\n";
print "Copying crontab file.......";
if ( -r "/var/cron/tabs/${user}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/var/cron/tabs/$user", "$work_dir/cron" );
}
if ( -r "/var/spool/cron/${user}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/var/spool/cron/$user", "$work_dir/cron" );
}
elsif ( -r "/var/spool/fcron/${user}" ) {
Cpanel::SimpleSync::CORE::syncfile( "/var/spool/fcron/$user", "$work_dir/cron" );
}
elsif ( $> != 0 ) {
_simple_exec_into_file( "$work_dir/cron/$user", [ 'crontab', '-l' ] );
}
print "Done\n";
my $quota;
print "Copying quota info.......";
my $quota_file_backup_mtime = $is_incremental_backup ? ( ( stat("$work_dir/quota") )[9] || -1 ) : -1;
if ( $quota_file_backup_mtime <= ( ( stat("/etc/quota.conf") )[9] || 0 ) || $quota_file_backup_mtime >= $now ) {
if ( open( my $quota_fh, '<', '/etc/quota.conf' ) ) {
my $user_regex = qr/^\Q$user\E=/;
while ( readline($quota_fh) ) {
if ( $_ =~ $user_regex ) {
$quota = ( split( /=/, $_ ) )[1];
chomp($quota);
}
}
close($quota_fh);
}
if ( sysopen( my $qout_fh, "$work_dir/quota", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
print {$qout_fh} $quota;
close($qout_fh);
}
}
print "Done\n";
my $domain_data_backup_is_current = 0;
if ($is_incremental_backup) {
my $http_now = time();
my $httpdconf = Cpanel::ConfigFiles::find_httpconf();
my $httpd_conf_mtime = ( stat($httpdconf) )[9];
if ( $httpd_conf_mtime < $http_now ) {
my $newest_domain_file_mtime = 0;
foreach my $domain_file ( "$work_dir/sds", "$work_dir/sds2", "$work_dir/pds", "$work_dir/addons" ) {
if ( ( stat($domain_file) )[9] > $newest_domain_file_mtime ) {
$newest_domain_file_mtime = ( stat(_) )[9];
}
}
if ( $httpd_conf_mtime < $newest_domain_file_mtime ) {
$domain_data_backup_is_current = 1;
}
}
}
if ($domain_data_backup_is_current) {
print "Domain data backup is already current....Done\n";
}
else {
print "Storing Subdomains....\n";
my %SUBS;
if ($usedomainlookup) {
%SUBS = Cpanel::DomainLookup::listsubdomains(); #domainlookup takes no args
}
else {
#yes abshomedir and homedir are reversed here.
%SUBS = Cpanel::Config::userdata::ApacheConf::listsubdomains( $user, @DNS, @WWWDNS );
}
sysopen( SH, "$work_dir/sds", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $sd ( keys %SUBS ) {
syswrite( SH, "$sd\n", length "$sd\n" );
}
close(SH);
sysopen( SH, "$work_dir/sds2", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $sd ( keys %SUBS ) {
my $basedir = $SUBS{$sd};
$basedir =~ s/^$homedir\/?//g;
$basedir =~ s/^$syshomedir\/?//g;
my $temp = "$sd=$basedir\n";
syswrite( SH, $temp, length $temp );
}
close(SH);
print "Done\n";
print "Storing Parked Domains....\n";
my %SDS;
if ($usedomainlookup) {
%SDS = Cpanel::DomainLookup::getparked($dns);
}
else {
%SDS = Cpanel::Config::userdata::ApacheConf::getparked($dns);
}
sysopen( SH, "$work_dir/pds", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $sd ( keys %SDS ) {
my $temp = "$sd\n";
syswrite( SH, $temp, length $temp );
}
close(SH);
print "Done\n";
print "Storing Addon Domains....\n";
my (@PSUBS);
my ( %PN, %FN, $pname, $fname );
foreach ( keys %SUBS ) {
$fname = $_;
s/_/\./g;
$FN{$_} = $fname;
push( @PSUBS, $_ );
}
my %PARKED;
if ($usedomainlookup) {
%PARKED = Cpanel::DomainLookup::getmultiparked(@PSUBS);
}
else {
%PARKED = Cpanel::Config::userdata::ApacheConf::getaddon($user);
}
sysopen( SH, "$work_dir/addons", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $subdomain ( keys %PARKED ) {
foreach my $parked ( keys %{ $PARKED{$subdomain} } ) {
my $temp = "$parked=$FN{$subdomain}\n";
syswrite( SH, $temp, length $temp );
}
}
close(SH);
print "Done\n";
}
## case 38355: the cpmove/ssldomain file is only needed when moving from an older to a newer server
print "Storing ssl domain......";
sysopen( SH, "$work_dir/ssldomain", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
syswrite( SH, $ssldomain, length $ssldomain );
close(SH);
print "Done\n";
#passwd_mtime
print "Copying password.......";
my $shadow_file_backup_mtime = $is_incremental_backup ? ( ( stat("$work_dir/shadow") )[9] || -1 ) : -1;
if ( $shadow_file_backup_mtime <= $shadow_mtime || $shadow_file_backup_mtime >= $now ) {
if ( sysopen( my $shadow_fh, "$work_dir/shadow", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
chmod( 0600, "$work_dir/shadow" );
syswrite( $shadow_fh, $pass, length $pass );
close($shadow_fh);
}
}
print "Done\n";
print "Copying shell.......";
my $shell_file_backup_mtime = $is_incremental_backup ? ( ( stat("$work_dir/shell") )[9] || -1 ) : -1;
if ( $shell_file_backup_mtime <= $passwd_mtime || $shell_file_backup_mtime >= $now ) {
if ( sysopen( my $shell_fh, "$work_dir/shell", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
syswrite( $shell_fh, $shell, length $shell );
close($shell_fh);
}
}
print "Done\n";
if ( $> == 0 ) {
export_non_cpanel_locale( $user, $work_dir, $cpuser_ref );
}
else {
print "Exporting of the user's locale must be done as root.\n";
}
chdir($tarroot);
print "Creating Archive ....";
my $prefix_user = "${prefix}${user}";
if ( !$is_incremental_backup ) {
## e.g. invoked as './scripts/pkgacct $user "" userbackup'
## - or - './scripts/pkgacct $user /tmp backup'
if ($isbackup) {
my $destfile = "$prefix_user.${archiveext}";
my $rv = system( $tarcfg->{'bin'}, "pc${compressflag}f", $destfile, $prefix_user );
if ($rv) {
## case 20142: out of disk space on tar system call
print "\nERROR: tar of backup archive returned error $rv\n";
}
}
else {
## e.g. invoked as './scripts/pkgacct $user "" --split'
if ($split) {
handle_dir_to_splitfiles( $work_dir, $prefix_user, $system, $tarcfg, $compressflag, $archiveext );
}
else {
## e.g. invoked as './scripts/pkgacct $user'
run_dot_event(
sub {
my $destfile = "$prefix_user.${archiveext}";
my $rv = system( $tarcfg->{'bin'}, "pc${compressflag}f", $destfile, $prefix_user );
if ($rv) {
## case 20142: out of disk space on tar system call
print "\nERROR: tar of archive returned error $rv\n";
}
}
);
}
}
if ( -d $work_dir && !-l $work_dir ) {
system( "rm", "-rf", $work_dir );
}
}
print "Done\n";
if ($is_incremental_backup) {
## note: nothing seems to capture this, in the way that the other messages are
## captured by Whostmgr::Remote
print "pkgacct target is: $work_dir\n";
}
elsif ( !$split && !$is_incremental_backup ) {
print "pkgacctfile is: $work_dir.$archiveext\n";
}
if ( !$is_incremental_backup && !$split ) {
my $md5sum = Cpanel::MD5::getmd5sum("$work_dir.$archiveext");
print "md5sum is: $md5sum\n";
my $size = ( stat("$work_dir.$archiveext") )[7];
print "\nsize is: $size\n";
}
if ( $OPTS->{'skiphomedir'} ) {
my $du = qx( du -s $homedir );
my ($homesize_kb) = ( $du =~ m/^(\d+)/ );
## FreeBSD does not have the -b option; performing the calculation manually.
my $homesize = $homesize_kb * 1024;
print "\nhomesize is: $homesize\n";
}
}
sub dotsleep {
select( undef, undef, undef, 0.10 );
}
sub downgrade_mysql {
my $old_server = shift;
my $new_server = shift;
return if ( !$new_server || $new_server >= $old_server );
my $downgrade_table = {
'5.1' => { 'options' => ['--skip-events'] },
'5.0' => { 'options' => [ '--skip-routines', '--skip-triggers' ] },
'4.1' => { 'options' => ['--compatible=mysql40'] },
'4.0' => { 'options' => [] },
};
my @downgrade_order = sort { $b <=> $a } keys %{$downgrade_table};
my @options = ();
foreach my $version (@downgrade_order) {
if ( ( $old_server >= $version ) && ( $new_server != $version ) ) {
@options = $downgrade_table->{$version}{'munge'}->(@options)
if exists $downgrade_table->{$version}{'munge'};
push @options, @{ $downgrade_table->{$version}{'options'} };
}
last if $new_server == $version;
}
return @options;
}
sub mysqldumpdb {
my ($args) = @_;
my @options = @{ $args->{'options'} };
my $db = $args->{'db'};
my $table = $args->{'table'};
my $file = $args->{'file'};
my $time_stamp_file = $args->{'time-stamp-file'}; #created before we do the dump in case something changes during the dump
my $file_write_mode = $args->{'append'} ? '>>' : '>';
my $mysqldump = Cpanel::MysqlUtils::find_mysqldump();
my @db = ($db);
my @tables;
if ($table) {
if ( ref $table ) {
push @tables, @$table;
}
else {
push @tables, $table;
}
}
my $table_names = ( @tables ? join( ',', map { $db . '.' . $_ } @tables ) : $db );
print $table_names;
if ( $time_stamp_file && open( my $time_stamp_file_fh, '>', $time_stamp_file ) ) {
print {$time_stamp_file_fh} time();
close($time_stamp_file_fh);
}
my @cmdline = ( $mysqldump, @options, @db, ( @tables ? ( '--tables', @tables ) : () ) );
my $dump_ok = 1;
my ( $begin_point, $end_point ) = _exec_into_file( $file, $file_write_mode, \@cmdline );
$dump_ok = _check_error_file( $table_names, $file . '.err' );
#retry the dump if it fails
if ( !$dump_ok ) {
if ( $> == 0 ) {
my $mysqlcheck = Cpanel::MysqlUtils::find_mysqlcheck();
system( $mysqlcheck, '--repair', @db, ( @tables ? ( '--tables', @tables ) : () ) );
}
else {
system '/usr/local/cpanel/bin/mysqlwrap', 'REPAIRDB', $db[0];
}
( $begin_point, $end_point ) = _exec_into_file( $file, $file_write_mode, \@cmdline );
$dump_ok = _check_error_file( $table_names, $file . '.err' );
}
my $bytes_saved = int( $end_point - $begin_point );
if ( !$dump_ok ) {
Cpanel::Logger::warn( join( '.', @db ) . ': mysqldump failed -- database may be corrupt' );
}
print '(' . $bytes_saved . ' bytes) ';
return $bytes_saved;
}
sub _check_error_file {
my $header = shift;
my $file = shift;
my $ok = 1;
if ( -e $file ) {
if ( -s _ && open( my $fh, '<', $file ) ) {
while ( readline($fh) ) {
Cpanel::Logger::warn( $header . ': ' . $_ );
$ok = 0;
}
close($fh);
}
unlink($file);
}
return $ok;
}
sub _exec_into_file {
my ( $file, $file_write_mode, $cmd_ref ) = @_;
my $begin_point = -1;
my $end_point = -1;
if ( open( my $fh, $file_write_mode, $file ) && open( my $err_fh, '>', $file . '.err' ) ) {
$begin_point = tell($fh);
if ( my $pid = fork() ) {
sigsafe_blocking_waitpid($pid);
}
else {
open( STDOUT, '>&=' . fileno($fh) );
open( STDERR, '>&=' . fileno($err_fh) );
exec(@$cmd_ref);
exit 1; #prevent fork bomb on failure to exec()
}
# Seek to the end of the file
seek( $fh, 0, 2 );
$end_point = tell($fh);
close($fh);
}
return ( $begin_point, $end_point );
}
## e.g. invoked as './scripts/pkgacct $user'
sub homedir_block {
my ( $work_dir, $gid, $isbackup, $isuserbackup, $homedir, $user, $is_incremental_backup, $tarcfg ) = @_;
print "Copying homedir....";
lstat($work_dir);
if ( -d _ && !-l _ ) {
my ( $work_dir_uid, $work_dir_gid ) = ( stat(_) )[ 4, 5 ];
if ( $work_dir_uid != 0 || $work_dir_gid != $gid ) {
if ($has_lchown) {
Lchown::lchown( 0, $gid, $work_dir );
}
else {
chown( 0, $gid, $work_dir );
}
}
chmod( 0750, $work_dir );
}
lstat("$work_dir/homedir");
if ( -d _ && !-l _ ) {
my ( $work_dir_homedir_uid, $work_dir_homedir_gid ) = ( stat(_) )[ 4, 5 ];
if ( $work_dir_homedir_uid != 0 || $work_dir_homedir_gid != 0 ) {
if ($has_lchown) {
Lchown::lchown( 0, 0, "$work_dir/homedir" );
}
else {
chown( 0, 0, "$work_dir/homedir" );
}
}
}
elsif ( !-e _ ) {
mkdir( "$work_dir/homedir", 0700 );
lstat("$work_dir/homedir");
}
chmod( 0700, "$work_dir/homedir" ) if ( sprintf( '%04o', ( lstat(_) )[2] & 07777 ) ne '0700' );
run_dot_event(
sub {
if ( $isbackup || $isuserbackup ) { Cpanel::SafeSync::build_cpbackup_exclude_conf( $homedir, $user ); }
my $nfl_ref = {};
if ( !$is_incremental_backup ) {
open( REALSTDOUT, ">&STDOUT" );
my $dest_tar_fname = "$work_dir/homedir.tar";
if ( sysopen( my $dest_tar_fh, $dest_tar_fname, &Fcntl::O_WRONLY | &Fcntl::O_TRUNC | &Fcntl::O_CREAT, 0600 ) ) {
my $tar_reader_child_pid = open( my $tar_fh, '-|' );
if ($tar_reader_child_pid) {
close(REALSTDOUT);
sigsafe_blocking_waitpid($tar_reader_child_pid);
## this is the exit code from the tar child process in the else block
my $exit_code = $? >> 8;
my $out_of_disk_space = 0;
my $permission_problem = 0;
if ($exit_code) {
my $partition_map = {};
my @partitions = ();
my $filesys = Cpanel::DiskLib::get_disk_used_percentage();
foreach my $key ( keys %{$filesys} ) {
$partition_map->{ $filesys->{$key}{'mount'} } = $filesys->{$key}{'available'};
push @partitions, { mount => $filesys->{$key}{'mount'}, key => $key };
}
my $pwd = Cwd::getcwd();
my $mount_point = find_mount( \@partitions, $pwd );
my $available = $partition_map->{$mount_point};
if ( $available <= 10 || ( !-e $dest_tar_fname || -z _ ) ) {
$out_of_disk_space = 1;
}
else {
$permission_problem = 1;
}
}
if ($permission_problem) {
print "\nOne or more files in the home directory were not readable and were not copied. Please review the home directory upon completion of transfer\n\n";
}
if ($out_of_disk_space) {
## case 20142: out of disk space on tar exec call
print "\nERROR: tar of homedir returned error $exit_code\n";
## env variable set when pkgacct is called from context of scripts/cpbackup
## (consider renaming ..._NOTIFY_ON_FAIL)
if ( $ENV{'CPBACKUP_NOTIFY_FAIL'} ) {
if ( my $pid = fork() ) {
sigsafe_blocking_waitpid($pid);
}
else {
require Cpanel::Notify;
require Cpanel::Hostname;
my $host = Cpanel::Hostname::gethostname();
Cpanel::Notify::notification(
'application' => 'cpbackup',
'status' => 'account backup failure',
'priority' => 2,
# Once per day
'interval' => 60 * 60 * 24,
'subject' => sprintf( "The backup of %s's account encountered errors on $host", $user ),
'message' => sprintf( "The backup of %s's account encountered errors\n" . "Please check /usr/local/cpanel/logs/error_log and %s for more information.", $ENV{'CPBACKUP_LOGFILE'}, $user ),
'msgtype' => ''
);
exit;
}
}
}
}
else {
## note: this is the block that performs the 'tar' of homedir
# we need to do this before we setuid to ensure we are writing to the error log
# so Cannot savedir: Permission denied
# does not get into the pkgacct transfer screen and cause the transfer to abort
if ( $ENV{'CPBACKUP'} ) {
open( STDERR, '>&REALSTDOUT' );
}
else {
open( STDERR, '>>', '/usr/local/cpanel/logs/error_log' );
}
if ( $> == 0 ) { Cpanel::AccessIds::SetUids::setuids($user); }
chdir($homedir) || exit;
my @tarargs = ( '-c', '-f', '-' );
if ( $isbackup || $isuserbackup ) {
if ( -r $homedir . '/cpbackup-exclude.conf' && -s _ ) {
push @tarargs, '-X', $homedir . '/cpbackup-exclude.conf';
}
if ( -r $Cpanel::SafeSync::global_exclude && -s _ ) {
push @tarargs, '-X', $Cpanel::SafeSync::global_exclude;
}
}
if ($isuserbackup) {
push @tarargs, '--exclude', 'backup-[!_]*_[!-]*-[!-]*-[!_]*_' . $user . '*';
}
open( STDOUT, ">&=" . fileno($dest_tar_fh) );
my $cmdstr = join( ' ', $tarcfg->{'bin'}, @tarargs, '.' );
exec( $tarcfg->{'bin'}, @tarargs, '.' );
die "Failed to execute: $cmdstr: $!";
}
close($dest_tar_fh);
my $parse_ok = 0;
( $parse_ok, $nfl_ref ) = Cpanel::SafeSync::find_uid_files_from_tarball( "$work_dir/homedir.tar", [ 'cpanel', 'nobody' ] );
if ( !$parse_ok ) {
#if we could not parse the tar file -t (--list) output then manually stat each file
$nfl_ref = Cpanel::SafeSync::find_uid_files( $homedir, [ 'cpanel', 'nobody' ] );
}
}
else {
print "...failed to save tarball for homedir to $dest_tar_fname: $!...\n";
}
}
else {
$nfl_ref = Cpanel::SafeSync::safesync(
'pkgacct' => 1, #ignore ftp quota files
'user' => $user,
'gidlist' => [ 'cpanel', 'nobody' ],
'source' => $homedir,
'dest' => "$work_dir/homedir",
'chown' => 0,
'isbackup' => ( $isbackup || $isuserbackup ),
'delete' => ( $is_incremental_backup ? 1 : 0 ),
'verbose' => 0
);
}
chmod( 0700, "$work_dir/homedir" ) if ( sprintf( '%04o', ( stat("$work_dir/homedir") )[2] & 07777 ) ne '0700' );
## TODO: note similar (but not identical) clause in ¬ate_homedir_nobodyfiles
sysopen( my $nf_fh, "$work_dir/nobodyfiles", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $file ( grep { $nfl_ref->{$_} eq 'nobody' } keys %$nfl_ref ) {
chomp($file);
$file =~ s/^\Q$homedir\E\/?//g;
print {$nf_fh} $file . "\n";
}
close($nf_fh);
}
);
print "Done\n";
}
sub build_pkgtree {
my ($work_dir) = @_;
mkdir( $work_dir, 0700 ) if !-e $work_dir;
foreach my $dir ( 'cp', 'resellerconfig', 'logs', 'mysql-timestamps', 'mysql', 'psql', 'mm', 'mms', 'mma', 'mma/pub', 'mma/priv', 'va', 'vad', 'fp', 'fp/sites', 'interchange', 'httpfiles', 'vf', 'cron', 'sslcerts', 'sslkeys', 'dnszones', 'counters', 'bandwidth', 'suspended', 'suspendinfo', 'userdata', 'meta', 'domainkeys', 'domainkeys/private', 'domainkeys/public', 'locale' ) {
mkdir( "$work_dir/$dir", 0700 ) if ( !-e "$work_dir/$dir" );
}
}
sub get_signal_num_from_name {
## $sr_loaded_signals and %signo passed in by reference, as they were formerly both global, and assigned
## to below...
my ( $signame, $sr_loaded_signals, $hr_signo ) = @_;
if ( !$$sr_loaded_signals ) {
require Config;
my $i = 0;
foreach my $name ( split( ' ', $Config::Config{'sig_name'} ) ) {
if ( $name =~ /USR/ ) {
$hr_signo->{$name} = $i;
}
$i++;
}
$$sr_loaded_signals = 1;
}
return $hr_signo->{$signame};
}
##
## Deprecated as we no longer need it
##
#sub quickrun_stdin {
# my $bin = shift;
# my $stdin = shift;
#
# my $output;
# my $stdinpid = IPC::Open3::open3( my $wtrstdin, my $rdrstdin, ">&STDERR", $bin );
# print {$wtrstdin} $stdin . "\n";
# close($wtrstdin);
# while ( readline($rdrstdin) ) {
# $output .= $_;
# }
# close($rdrstdin);
# sigsafe_blocking_waitpid( $stdinpid );
# return $output;
#}
sub create_antitimeout_process {
my $dotpid;
if ( $dotpid = fork() ) {
}
else {
my $dotcount = 5;
while (1) {
if ( $dotcount % 15 == 0 ) {
print ".........\n";
}
dotsleep();
$dotcount++;
}
}
return $dotpid;
}
# Not used anymore
#sub nooutputsystemsh {
# my (@cmd) = @_;
# my $pid;
# if ( $pid = fork() ) {
#
# #master
# }
# else {
# open( STDIN, "<", "/dev/null" );
# open( STDOUT, ">", "/dev/null" );
# open( STDERR, ">", "/dev/null" );
# exec(@cmd);
#
# exit 1; #prevent fork bomb on failure
# }
# sigsafe_blocking_waitpid( $pid );
#}
sub find_mount {
my ( $partitions, $directory ) = @_;
$directory =~ s/\/+/\//g;
foreach my $mount ( sort { length $b->{'mount'} <=> length $a->{'mount'} } @{$partitions} ) {
if ( Cpanel::StringFunc::Match::beginmatch( $directory, $mount->{'mount'} ) ) {
return $mount->{'mount'};
}
}
return '/';
}
## e.g. invoked as './scripts/pkgacct $user "" --split'
sub handle_dir_to_splitfiles {
my ( $work_dir, $prefix_user, $system, $tarcfg, $compressflag, $archiveext ) = @_;
my $basedir = "${work_dir}-split";
mkdir( $basedir, 0700 );
rename( $work_dir, "$basedir/$prefix_user" );
chdir($basedir);
opendir( SPD, $basedir );
my @FILES = readdir(SPD);
closedir(SPD);
foreach my $file (@FILES) {
if ( -f "$basedir/${file}" ) {
unlink("$basedir/${file}");
}
}
my $dotpid = create_antitimeout_process();
my $rv = _split_dir( $basedir, $prefix_user, $tarcfg->{'bin'}, $system, $compressflag, $archiveext );
print "\n";
opendir( SPD, $basedir );
@FILES = ();
@FILES = readdir(SPD);
closedir(SPD);
for ( 0 .. $#FILES ) {
my $file = $FILES[$_];
next if ( $file !~ /^\Q$prefix_user\E/ ); #in case of cruft files
my $splitfile = "$basedir/$file";
if ( -f $splitfile ) {
print "splitpkgacctfile is: $splitfile\n";
my $md5sum = Cpanel::MD5::getmd5sum($splitfile);
if ( $dotpid && $dotpid > 0 ) {
kill( 8, $dotpid );
kill( 9, $dotpid );
}
print "\nsplitmd5sum is: $md5sum\n";
my $splitsize = ( stat($splitfile) )[7];
print "\nsplitsize is: $splitsize\n";
if ( $_ != $#FILES ) {
$dotpid = create_antitimeout_process();
}
}
}
if ( $dotpid && $dotpid > 0 ) {
kill( 8, $dotpid );
kill( 9, $dotpid );
}
$dotpid = create_antitimeout_process();
if ( -d "$basedir/$prefix_user"
&& !-l "$basedir/$prefix_user" ) {
system( "rm", "-rf", "$basedir/$prefix_user" );
}
if ( $dotpid && $dotpid > 0 ) {
kill( 8, $dotpid );
kill( 9, $dotpid );
}
return;
}
sub _split_dir {
my ( $basedir, $dir2tar, $tarcmd, $system, $compressflag, $archiveext ) = @_;
chdir($basedir); ## TODO: get rid of the working directory assumption
my $TARPIPE;
if ( $system =~ /freebsd/i ) {
open( $TARPIPE, "-|" ) || exec( $tarcmd, "pc${compressflag}f", "-", $dir2tar );
}
else {
open( $TARPIPE, "-|" ) || exec( $tarcmd, "pc${compressflag}", $dir2tar );
}
## $part starts at 0 to detect the first time through the loop (immediately incr'ed)
my $part = 0;
my $bytes = 0;
my $PART;
while ( my $bytes_read = sysread( $TARPIPE, my $bf, 65_535 ) ) {
$bytes += $bytes_read;
## do this the first time through and when buffer is full
if ( !$part || ( $bytes > $splitfile_partsize ) ) {
## only close when not the first time through
if ($part) {
close($PART);
}
$bytes = 0;
$part++;
my $fname = sprintf( "%s.%s.part%05d", $dir2tar, $archiveext, $part );
open( $PART, '>', $fname );
chmod( 0600, $fname );
}
my $bytes_write = syswrite( $PART, $bf );
if ( $bytes_write == 0 ) {
## case 20142: out of disk space on syswrite
print "\nERROR: tar of split archive ran out of space\n";
last;
}
}
close($TARPIPE);
close($PART);
return;
}
sub export_non_cpanel_locale {
my ( $user, $dest, $user_file ) = @_;
if ( !defined $user_file ) {
$user_file = Cpanel::Config::LoadCpUserFile::loadcpuserfile($user);
if ( !scalar keys %{$user_file} ) {
print "\nERROR: unable to load cPanel user data\n";
return;
}
}
my $current_locale = $user_file->{'LOCALE'};
my $locale = Cpanel::Locale->get_handle(); #issafe #nomunge
my $is_installed_locale = grep { $current_locale eq $_ } Cpanel::Locale::Utils::Display::get_locale_list($locale); #issafe #nomunge
if ( !exists $Cpanel::Locale::Utils::3rdparty::cpanel_provided{$current_locale} && $is_installed_locale ) { #issafe #nomunge
print "Copying locale ...";
system( '/scripts/locale_export', '--quiet', "--locale=$current_locale", "--export-${current_locale}=$dest/locale/${current_locale}.xml" );
print "Done\n";
}
}
sub process_args {
my (@argv) = @_;
my %OPTS;
# We always do archive version 3 now unless we are doing an incremental backup
$OPTS{'archive_version'} = 3;
if ( $argv[0] eq '--version' ) {
shift(@argv);
shift(@argv);
}
$OPTS{'skiphomedir'} = 0;
if ( $argv[0] eq '--skiphomedir' ) {
$OPTS{'skiphomedir'} = 1;
shift(@argv);
}
my $user = $argv[0];
my $tarroot = $argv[1];
## from scripts/cpbackup
$OPTS{'backup'} = ( $argv[2] eq "backup" );
## from bin/backupadmin.pl
$OPTS{'userbackup'} = ( $argv[2] eq "userbackup" );
## set when called from whm5
$OPTS{'split'} = ( $argv[2] =~ /split/i );
$OPTS{'nocompress'} = ( $argv[3] =~ /nocompress/i );
my ($new_mysql_version);
if ( $argv[4] =~ /mysql/i ) {
$new_mysql_version = $argv[5];
}
return ( $user, $tarroot, \%OPTS, $new_mysql_version );
}
## when invoked as "/scripts/pkgacct --skiphomedir $user"
sub notate_homedir_nobodyfiles {
my ( $homedir, $work_dir ) = @_;
my $nfl_ref = Cpanel::SafeSync::find_uid_files( $homedir, ['nobody'] );
sysopen( my $nf_fh, "$work_dir/nobodyfiles", &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 );
foreach my $file ( keys %$nfl_ref ) {
chomp($file);
$file =~ s/^\Q$homedir\E\/?//g;
print {$nf_fh} $file . "\n";
}
close($nf_fh);
return;
}
sub _db_needs_backup {
#print "[_db_needs_backup] entry\n";
return 1 if !$is_incremental_backup;
# $mysqldatadir is not used
my ( $db_type, $db_name, $mysqldatadir, $target_sql_file, $sql_files, $update_times_cache_ref ) = @_;
# Eventually we could support postgres if we had a fast way to check to see when the database
# was last updated.
foreach my $file (@$sql_files) {
if ( !-e $file ) { return 2; }
if ( -z _ ) { return 1; } #check for failed backups
}
# If it is an incremental backup we check to see if the
# it has changed and skip the backup if it has not
my $target_sql_file_mtime = ( stat($target_sql_file) )[9];
my $now = time();
#print "[_db_needs_backup][$db_name] target_sql_file_mtime=$target_sql_file_mtime\n";
return 1 if !$target_sql_file_mtime || $target_sql_file_mtime > $now;
my $last_update_time = ( exists $update_times_cache_ref->{$db_name} && $update_times_cache_ref->{$db_name} ) ? $update_times_cache_ref->{$db_name} : Cpanel::SafeRun::Simple::saferun( '/usr/local/cpanel/bin/' . $db_type . 'tool', 'LASTUPDATETIME', $db_name );
chomp($last_update_time);
#print "[_db_needs_backup][$db_name] last_update_time=$last_update_time\n";
#print "[_db_needs_backup][$db_name] now=$now\n";
return 1 if ( !$last_update_time
|| $last_update_time > $target_sql_file_mtime
|| $last_update_time > $now );
#print "[_db_needs_backup][$db_name] Skipping backup of database $db_name last backup time = [$target_sql_file_mtime], last update time = [$last_update_time]\n";
my $last_update_time_localtime = localtime($last_update_time);
my $target_sql_file_mtime_localtime = localtime($target_sql_file_mtime);
print "$db_name skipped (last change @ $last_update_time_localtime, current backup @ $target_sql_file_mtime_localtime)\n";
return 0; #no need to backup again
}
sub _file_needs_backup {
return 1 if !$is_incremental_backup;
my ( $source_file, $target_file, $name ) = @_;
if ( -z $target_file ) { return 1; } #check for failed backups
my $target_file_mtime = ( stat(_) )[9];
my $last_update_time = ( stat($source_file) )[9];
my $now = time();
return 1 if ( !$last_update_time
|| $last_update_time > $target_file_mtime
|| $last_update_time > $now );
my $last_update_time_localtime = localtime($last_update_time);
my $target_file_mtime_localtime = localtime($target_file_mtime);
$name ||= $source_file;
print "$name skipped (last change @ $last_update_time_localtime, current backup @ $target_file_mtime_localtime)\n";
return 0; #no need to backup again
}
sub run_dot_event { #uses a self pipe to finish instantly
my $code = shift;
# Setup a pipe so we can write a zero to the parent on SIGCHLD
# (see the select on $rin below) This will cause our select
# below to trigger when the signal is recieved (usually a child exit).
my ( $read_handle, $write_handle );
pipe( $read_handle, $write_handle );
_setfhnonblock($read_handle);
_setfhnonblock($write_handle);
local $SIG{'CHLD'} = sub {
syswrite( $write_handle, '0', 1 );
};
my ( $rin, $nfound );
vec( $rin, fileno($read_handle), 1 ) = 1;
my $original_pname = $0;
if ( my $pid = fork() ) {
$0 = $original_pname . ' - waiting for subprocess: ' . $pid;
my $buffer;
## *** Please see case 44803 before making any changes below
## We previously saw waitpid($pid,0) finishing before we expected
## when SIGSTOP/SIGCONT is sent to the process group. ***
until ( ( my $child = waitpid( $pid, 1 ) ) == $pid ) {
last if $child == -1;
print ".........\n";
## select with a timeout of 5s for the printed dots
## select on $rin, checking for a true value, as the syswrite
## on SIGCHLD will pop out of the select immediately
if ( $nfound = select( $rin, undef, undef, 5 ) ) {
# flush out the '0's we got from the pipe
# when there is data on the pipe (from CHLD handler)
read( $read_handle, $buffer, 4096 );
}
}
}
else {
$0 = $original_pname . ' - subprocess';
local $SIG{'CHLD'} = 'DEFAULT'; #very important or we will pollute the parent
exit( $code->() );
}
$0 = $original_pname;
}
sub _setfhnonblock {
my $fh = shift;
my $flags = fcntl( $fh, $Cpanel::Hulk::Constants::F_GETFL, 0 ) #issafe #nomunge
or Cpanel::Logger::warn("Can't get the fcntl flags on $fh: $!\n");
fcntl( $fh, $Cpanel::Hulk::Constants::F_SETFL, $flags | $Cpanel::Hulk::Constants::O_NONBLOCK ) #issafe #nomunge
or Cpanel::Logger::warn("Can't fcntl $fh to non-blocking: $!\n");
}
sub _get_matching_files {
my $dir = shift;
my $regex = shift;
my $compiled_regex;
eval { $compiled_regex = qr/$regex/; };
if ( !$compiled_regex ) {
print "Failed to compile regex $regex\n";
return;
}
my $dot_files_regex = qr/^\.\.?$/;
opendir( my $dir_h, $dir );
my @files = map { "$dir/$_" } grep { $_ !~ $dot_files_regex && $_ =~ $compiled_regex } readdir($dir_h);
closedir($dir_h);
return \@files;
}
sub _simple_exec_into_file {
my ( $file, $cmdref ) = @_;
if ( sysopen( my $fh, $file, &Fcntl::O_WRONLY | &Fcntl::O_CREAT | &Fcntl::O_NOFOLLOW | &Fcntl::O_TRUNC, 0600 ) ) {
if ( my $pid = fork() ) {
sigsafe_blocking_waitpid($pid);
}
else {
open( STDOUT, '>&=' . fileno($fh) );
exec(@$cmdref);
exit 1;
}
close($fh);
}
}
#
# Runs an adminbin with the multipart backup mode
# Example:
#-- cPanel BEGIN PING
#PONG
#-- cPanel END PING
#-- cPanel BEGIN LISTDBS
#mydbs1
#-- cPanel END LISTDBS
#-- cPanel BEGIN DUMPSQL_USERS
#-- cPanel END DUMPSQL_USERS
#-- cPanel BEGIN DUMPSQL_GRANTS
#-- cPanel END DUMPSQL_GRANTS
#
#
sub _run_admin_backupcmd {
my @cmd = @_;
my ( %data, $prog_fh, $pid, $command, $position );
if ( $pid = open( $prog_fh, '-|' ) ) {
}
else {
( $ENV{'PATH'} ) = $ENV{'PATH'} =~ m/(.*)/; # untaint, case 6622
exec(@cmd);
exit(1);
}
if ( !$prog_fh ) {
warn 'Error while executing: [' . join( ' ', @cmd ) . ']: ' . $!;
return \%data;
}
while ( my $line = readline($prog_fh) ) {
#
#print "[DEBUG] $line";
#
if ( $line =~ /^-- cPanel/ ) {
( $position, $command ) = ( $line =~ /^-- cPanel (\S+) (\S+)/ );
$command = '' if $position eq 'END';
next;
}
elsif ($command) {
$data{$command} .= $line;
}
else {
warn "Unknown data from [" . join( " ", @cmd ) . "]: $line";
}
}
close($prog_fh);
sigsafe_blocking_waitpid($pid);
return \%data;
}
sub sigsafe_blocking_waitpid {
my $pid = shift;
until ( ( my $child = waitpid( $pid, 0 ) ) == $pid ) {
last if $child == -1;
}
}
1;