diff --git a/fhem/CHANGED b/fhem/CHANGED index d5d5641b9..141430aff 100644 --- a/fhem/CHANGED +++ b/fhem/CHANGED @@ -1,5 +1,7 @@ # Add changes at the top of the list. Keep it in ASCII, and 80-char wide. # Do not insert empty lines here, update check depends on it. + - feature: 93_DbRep: V7.12.0, compression of dumpfile, restore of compressed + files possible - change: 77_SMAEM: V3.1.0, extend error handling in define -bugfix: 73_ElectricityCalculator: Min dt for calc. reduced from 30 to 1 s - feature: 93_DbRep: V7.11.0, "repairSQLite" to repair a corrupted SQLite DB diff --git a/fhem/FHEM/93_DbRep.pm b/fhem/FHEM/93_DbRep.pm index 344f6ef8c..b24512851 100644 --- a/fhem/FHEM/93_DbRep.pm +++ b/fhem/FHEM/93_DbRep.pm @@ -37,6 +37,7 @@ ########################################################################################################################### # Versions History: # +# 7.12.0 16.02.2018 compression of dumpfile, restore of compressed files possible # 7.11.0 12.02.2018 new command "repairSQLite" to repair a corrupted SQLite database # 7.10.0 10.02.2018 bugfix delete attr timeYearPeriod if set other time attributes, new "changeValue" command # 7.9.0 09.02.2018 new attribute "avgTimeWeightMean" (time weight mean calculation), code review of selection @@ -104,8 +105,8 @@ # commandref revised, minor fixes # 5.8.6 30.10.2017 don't limit attr reading, device if the attr contains a list # 5.8.5 19.10.2017 filter unwanted characters in "procinfo"-result -# 5.8.4 17.10.2017 createSelectSql, createDeleteSql, currentfillup_Push switch to devspec -# 5.8.3 16.10.2017 change to use createSelectSql: minValue,diffValue - createDeleteSql: delEntries +# 5.8.4 17.10.2017 createSelectSql, DbRep_createDeleteSql, currentfillup_Push switch to devspec +# 5.8.3 16.10.2017 change to use createSelectSql: minValue,diffValue - DbRep_createDeleteSql: delEntries # 5.8.2 15.10.2017 sub DbRep_createTimeArray # 5.8.1 15.10.2017 change to use createSelectSql: sumValue,averageValue,exportToFile,maxValue # 5.8.0 15.10.2017 adapt createSelectSql for better performance if time/aggregation not set, @@ -307,13 +308,15 @@ use Blocking; use Color; # colorpicker Widget use Time::Local; use Encode qw(encode_utf8); +use IO::Compress::Gzip qw(gzip $GzipError); +use IO::Uncompress::Gunzip qw(gunzip $GunzipError); # no if $] >= 5.018000, warnings => 'experimental'; no if $] >= 5.017011, warnings => 'experimental::smartmatch'; sub DbRep_Main($$;$); sub DbLog_cutCol($$$$$$$); # DbLog-Funktion nutzen um Daten auf maximale Länge beschneiden -my $DbRepVersion = "7.11.0"; +my $DbRepVersion = "7.12.0"; my %dbrep_col = ("DEVICE" => 64, "TYPE" => 64, @@ -343,6 +346,7 @@ sub DbRep_Initialize($) { "averageCalcForm:avgArithmeticMean,avgDailyMeanGWS,avgTimeWeightMean ". "device " . "dumpComment ". + "dumpCompress:1,0 ". "dumpDirLocal ". "dumpDirRemote ". "dumpMemlimit ". @@ -3570,9 +3574,9 @@ sub del_DoParse($) { # SQL zusammenstellen für DB-Operation if ($IsTimeSet || $IsAggrSet) { - $sql = createDeleteSql($hash,$table,$device,$reading,$runtime_string_first,$runtime_string_next,''); + $sql = DbRep_createDeleteSql($hash,$table,$device,$reading,$runtime_string_first,$runtime_string_next,''); } else { - $sql = createDeleteSql($hash,$table,$device,$reading,undef,undef,''); + $sql = DbRep_createDeleteSql($hash,$table,$device,$reading,undef,undef,''); } $sth = $dbh->prepare($sql); @@ -6211,6 +6215,14 @@ sub mysql_DoDumpClientSide($) { # SQL-Laufzeit ermitteln my $rt = tv_interval($st); + # Dumpfile komprimieren wenn dumpCompress=1 + my $compress = AttrVal($name,"dumpCompress",0); + if($compress) { + # $err nicht auswerten -> wenn compress fehlerhaft wird unkomprimiertes dumpfile verwendet + ($err,$backupfile) = DbRep_dumpCompress($hash,$backupfile); + $filesize = (stat("$dump_path$backupfile"))[7]; + } + # Dumpfile per FTP senden und versionieren my ($ftperr,$ftpmsg,@ftpfd) = DbRep_sendftp($hash,$backupfile); my $ftp = $ftperr?encode_base64($ftperr,""):$ftpmsg?encode_base64($ftpmsg,""):0; @@ -6233,7 +6245,7 @@ sub mysql_DoDumpClientSide($) { Log3 ($name, 3, "DbRep $name - Finished backup of database $dbname, total time used: ".sprintf("%.0f",$brt)." sec."); Log3 ($name, 4, "DbRep $name -> BlockingCall mysql_DoDumpClientSide finished"); -return "$name|$rt|''|$sql_file|$drc|$drh|$fsize|$ftp|$bfd|$ffd"; +return "$name|$rt|''|$dump_path$backupfile|$drc|$drh|$fsize|$ftp|$bfd|$ffd"; } #################################################################################################### @@ -6365,6 +6377,13 @@ sub mysql_DoDumpServerSide($) { # SQL-Laufzeit ermitteln my $rt = tv_interval($st); + # Dumpfile komprimieren wenn dumpCompress=1 + my $compress = AttrVal($name,"dumpCompress",0); + if($compress) { + # $err nicht auswerten -> wenn compress fehlerhaft wird unkomprimiertes dumpfile verwendet + ($err,$bfile) = DbRep_dumpCompress($hash,$bfile); + } + # Größe Dumpfile ermitteln ("dumpDirRemote" muß auf "dumpDirLocal" gemountet sein) my $dump_path_def = $attr{global}{modpath}."/log/"; my $dump_path_loc = AttrVal($name,"dumpDirLocal", $dump_path_def); @@ -6490,6 +6509,13 @@ sub sqlite_DoDump($) { # SQL-Laufzeit ermitteln my $rt = tv_interval($st); + + # Dumpfile komprimieren + my $compress = AttrVal($name,"dumpCompress",0); + if($compress) { + # $err nicht auswerten -> wenn compress fehlerhaft wird unkomprimiertes dumpfile verwendet + ($err,$bfile) = DbRep_dumpCompress($hash,$bfile); + } # Größe Dumpfile ermitteln my @a = split(' ',qx(du $dump_path$bfile)) if ($^O =~ m/linux/i || $^O =~ m/unix/i); @@ -6737,18 +6763,18 @@ return; #################################################################################################### sub sqlite_Restore ($) { my ($string) = @_; - my ($name, $bfile) = split("\\|", $string); - my $hash = $defs{$name}; - my $dbloghash = $hash->{dbloghash}; - my $dbconn = $dbloghash->{dbconn}; - my $dbuser = $dbloghash->{dbuser}; - my $dblogname = $dbloghash->{NAME}; - my $dbpassword = $attr{"sec$dblogname"}{secret}; - my $dump_path_def = $attr{global}{modpath}."/log/"; - my $dump_path = AttrVal($name, "dumpDirLocal", $dump_path_def); - $dump_path = $dump_path."/" unless($dump_path =~ m/\/$/); - my $ebd = AttrVal($name, "executeBeforeProc", undef); - my $ead = AttrVal($name, "executeAfterProc", undef); + my ($name,$bfile) = split("\\|", $string); + my $hash = $defs{$name}; + my $dbloghash = $hash->{dbloghash}; + my $dbconn = $dbloghash->{dbconn}; + my $dbuser = $dbloghash->{dbuser}; + my $dblogname = $dbloghash->{NAME}; + my $dbpassword = $attr{"sec$dblogname"}{secret}; + my $dump_path_def = $attr{global}{modpath}."/log/"; + my $dump_path = AttrVal($name, "dumpDirLocal", $dump_path_def); + $dump_path = $dump_path."/" unless($dump_path =~ m/\/$/); + my $ebd = AttrVal($name, "executeBeforeProc", undef); + my $ead = AttrVal($name, "executeAfterProc", undef); my ($dbh,$err,$dbname); Log3 ($name, 4, "DbRep $name -> Start BlockingCall sqlite_Restore"); @@ -6775,6 +6801,17 @@ sub sqlite_Restore ($) { } $dbname = (split /[\/]/, $dbname)[-1]; + + # Dumpfile dekomprimieren wenn gzip + if($bfile =~ m/.*.gzip$/) { + ($err,$bfile) = DbRep_dumpUnCompress($hash,$bfile); + if ($err) { + $err = encode_base64($err,""); + Log3 ($name, 4, "DbRep $name -> BlockingCall sqlite_DoDump finished"); + $dbh->disconnect; + return "$name|''|$err|''|''"; + } + } Log3 ($name, 3, "DbRep $name - Starting restore of database '$dbname'"); @@ -6838,6 +6875,17 @@ sub mysql_RestoreServerSide($) { return "$name|''|$err|''|''"; } + # Dumpfile dekomprimieren wenn gzip + if($bfile =~ m/.*.gzip$/) { + ($err,$bfile) = DbRep_dumpUnCompress($hash,$bfile); + if ($err) { + $err = encode_base64($err,""); + Log3 ($name, 4, "DbRep $name -> BlockingCall sqlite_DoDump finished"); + $dbh->disconnect; + return "$name|''|$err|''|''"; + } + } + Log3 ($name, 3, "DbRep $name - Starting restore of database '$dbname', table '$table'."); # SQL-Startzeit @@ -7107,7 +7155,7 @@ return $sql; #################################################################################################### # SQL-Statement zusammenstellen für Löschvorgänge #################################################################################################### -sub createDeleteSql($$$$$$$) { +sub DbRep_createDeleteSql($$$$$$$) { my ($hash,$table,$device,$reading,$tf,$tn,$addon) = @_; my $name = $hash->{NAME}; my $dbmodel = $hash->{dbloghash}{MODEL}; @@ -7748,9 +7796,9 @@ sub DbRep_deldumpfiles ($$) { my $dump_path_def = $attr{global}{modpath}."/log/"; my $dump_path_loc = AttrVal($name,"dumpDirLocal", $dump_path_def); my $dfk = AttrVal($name,"dumpFilesKeep", 3); - my $pfix = (split '\.', $bfile)[-1]; + my $pfix = (split '\.', $bfile)[1]; my $dbname = (split '_', $bfile)[0]; - my $file = $dbname."_.*".$pfix; + my $file = $dbname."_.*".$pfix.".*"; # Files mit/ohne Endung "gzip" berücksichtigen my @fd; if(!opendir(DH, $dump_path_loc)) { @@ -7775,13 +7823,73 @@ sub DbRep_deldumpfiles ($$) { return @fd; } +#################################################################################################### +# Dumpfile komprimieren +#################################################################################################### +sub DbRep_dumpCompress ($$) { + my ($hash,$bfile) = @_; + my $name = $hash->{NAME}; + my $dump_path_def = $attr{global}{modpath}."/log/"; + my $dump_path_loc = AttrVal($name,"dumpDirLocal", $dump_path_def); + $dump_path_loc =~ s/(\/$|\\$)//; + my $input = $dump_path_loc."/".$bfile; + my $output = $dump_path_loc."/".$bfile.".gzip"; + + Log3($name, 3, "DbRep $name - compress file $input"); + + my $stat = gzip $input => $output ,BinModeIn => 1; + if($GzipError) { + Log3($name, 2, "DbRep $name - gzip of $input failed: $GzipError"); + return ($GzipError,$input); + } + + Log3($name, 3, "DbRep $name - file compressed to output file: $output"); + unlink("$input"); + Log3($name, 3, "DbRep $name - input file deleted: $input"); + +return (undef,$bfile.".gzip"); +} + +#################################################################################################### +# Dumpfile dekomprimieren +#################################################################################################### +sub DbRep_dumpUnCompress ($$) { + my ($hash,$bfile) = @_; + my $name = $hash->{NAME}; + my $dump_path_def = $attr{global}{modpath}."/log/"; + my $dump_path_loc = AttrVal($name,"dumpDirLocal", $dump_path_def); + $dump_path_loc =~ s/(\/$|\\$)//; + my $input = $dump_path_loc."/".$bfile; + my $outfile = $bfile; + $outfile =~ s/\.gzip//; + my $output = $dump_path_loc."/".$outfile; + + Log3($name, 3, "DbRep $name - uncompress file $input"); + + my $stat = gunzip $input => $output ,BinModeOut => 1; + if($GunzipError) { + Log3($name, 2, "DbRep $name - gunzip of $input failed: $GunzipError"); + return ($GunzipError,$input); + } + + Log3($name, 3, "DbRep $name - file uncompressed to output file: $output"); + + # Größe dekomprimiertes File ermitteln + my @a = split(' ',qx(du $output)) if ($^O =~ m/linux/i || $^O =~ m/unix/i); + + my $filesize = ($a[0])?($a[0]*1024):undef; + my $fsize = DbRep_byteOutput($filesize); + Log3 ($name, 3, "DbRep $name - Size of uncompressed file: ".$fsize); + +return (undef,$outfile); +} + #################################################################################################### # erzeugtes Dump-File aus dumpDirLocal zum FTP-Server übertragen #################################################################################################### sub DbRep_sendftp ($$) { my ($hash,$bfile) = @_; my $name = $hash->{NAME}; - my $dbloghash = $hash->{dbloghash}; my $dump_path_def = $attr{global}{modpath}."/log/"; my $dump_path_loc = AttrVal($name,"dumpDirLocal", $dump_path_def); my $file = (split /[\/]/, $bfile)[-1]; @@ -7796,9 +7904,9 @@ sub DbRep_sendftp ($$) { my $ftpPassive = AttrVal($name,"ftpPassive",0); my $ftpDebug = AttrVal($name,"ftpDebug",0); my $fdfk = AttrVal($name,"ftpDumpFilesKeep", 3); - my $pfix = (split '\.', $bfile)[-1]; + my $pfix = (split '\.', $bfile)[1]; my $dbname = (split '_', $bfile)[0]; - my $ftpl = $dbname."_.*".$pfix; + my $ftpl = $dbname."_.*".$pfix.".*"; # Files mit/ohne Endung "gzip" berücksichtigen my ($ftperr,$ftpmsg,$ftp); # kein FTP verwenden oder möglich @@ -8348,6 +8456,8 @@ return; Scalar::Util
DBI
Color (FHEM-module)
+ IO::Compress::Gzip
+ IO::Uncompress::Gunzip
Blocking (FHEM-module)

Due to performance reason the following index should be created in addition:
@@ -8603,11 +8713,12 @@ return;
  • dumpMySQL [clientSide | serverSide] - creates a dump of the connected MySQL database.
    - Depended from selected option the dump will be created on Client- or on Serv-Side.
    + Depending from selected option the dump will be created on Client- or on Server-Side.
    The variants differs each other concerning the executing system, the creating location, the usage of attributes, the function result and the needed hardware ressources.
    The option "clientSide" e.g. needs more powerful FHEM-Server hardware, but saves all available - tables inclusive possibly created views. + tables inclusive possibly created views.
    + With attribute "dumpCompress" a compression of dump file after creation can be switched on.

    to restore the database from the dump.


    + Option serverSide
    The dump will be created on the MySQL-Server and will be saved in its Home-directory @@ -8660,10 +8785,22 @@ return; To avoid FHEM from blocking, you have to operate DbLog in asynchronous mode if the table optimization want to be used !

    - After the dump a FHEM-command can be executed as well (see attribute "executeAfterProc").
    - - The attributes relevant for function "dumpMySQL serverSide" are "dumpDirRemote", "dumpDirLocal", - "dumpFilesKeep", "optimizeTablesBeforeDump", "executeBeforeProc" and "executeAfterProc".

    + After the dump a FHEM-command can be executed as well (see attribute "executeAfterProc").

    + + The attributes relevant for function "dumpMySQL serverSide" are:

    + +
    The target directory can be set by attribute "dumpDirRemote". It must be located on the MySQL-Host and has to be writable by the MySQL-server process.
    @@ -8695,11 +8832,11 @@ return; directory "dumpDirLocal" (the mounted "dumpDirRemote"). In that case FHEM needs write permissions to the directory "dumpDirLocal".

    - The naming convention of dump files is: <dbname>_<date>_<time>.csv

    + The naming convention of dump files is: <dbname>_<date>_<time>.csv[.gzip]

    You can start a restore of table history from serverSide-Backup by command:



    @@ -8748,17 +8885,28 @@ return; optimization want to be used !

    Before and after the dump a FHEM-command can be executed (see attribute "executeBeforeProc", - "executeAfterProc").
    - - The attributes relevant for this function are "dumpDirLocal", "dumpFilesKeep", "executeBeforeProc", - "executeAfterProc" and "optimizeTablesBeforeDump".
    + "executeAfterProc").

    + + The attributes relevant for function "dumpMySQL serverSide" are:

    + +
    After a successfull finished dump the old dumpfiles are deleted and only the number of attribute "dumpFilesKeep" (default: 3) remain in the target directory "dumpDirLocal". If "dumpFilesKeep = 0" is set, all dumpfiles (also the current created file), are deleted. This setting can be helpful, if FTP transmission is used and the created dumps are only keep remain in the FTP destination directory.

    - The naming convention of dump files is: <dbname>_<date>_<time>.sqlitebkp

    + The naming convention of dump files is: <dbname>_<date>_<time>.sqlitebkp[.gzip]

    The database can be restored by command "set <name> restoreSQLite <filename>"
    The created dump file can be transfered to a FTP-server. Please see explanations about FTP- @@ -8971,19 +9119,19 @@ return;

  • -
  • restoreMySQL <file>.csv - imports the content of table history from a serverSide-backup.
    +
  • restoreMySQL <file>.csv[.gzip] - imports the content of table history from a serverSide-backup.
    The function provides a drop-down-list of files which can be used for restore. Therefore you have to mount the remote directory "dumpDirRemote" of the MySQL-Server on the Client and make it usable to the DbRep-device by setting the attribute "dumpDirLocal".
    - All files with extension "csv" and if the filename is beginning with the name of the connected database + All files with extension "csv[.gzip]" and if the filename is beginning with the name of the connected database (see Internal DATABASE) are listed.


  • -
  • restoreSQLite <File>.sqlitebkp - restores a backup of SQLite database.
    +
  • restoreSQLite <File>.sqlitebkp[.gzip] - restores a backup of SQLite database.
    The function provides a drop-down-list of files which can be used for restore. The data stored in the current database are deleted respectively overwritten. - All files with extension "sqlitebkp" and if the filename is beginning with the name of the connected database + All files with extension "sqlitebkp[.gzip]" and if the filename is beginning with the name of the connected database will are listed.


  • @@ -9279,6 +9427,8 @@ return;
  • dumpComment - User-comment. It will be included in the header of the created dumpfile by command "dumpMySQL clientSide".

  • + +
  • dumpCompress - if set, the dump files are compressed after operation of "dumpMySQL" bzw. "dumpSQLite"

  • dumpDirLocal - Target directory of database dumps by command "dumpMySQL clientSide" (default: "{global}{modpath}/log/" on the FHEM-Server).
    @@ -9835,6 +9985,8 @@ sub bdump { Scalar::Util
    DBI
    Color (FHEM-Modul)
    + IO::Compress::Gzip
    + IO::Uncompress::Gunzip
    Blocking (FHEM-Modul)

    Aus Performancegründen sollten zusätzlich folgender Index erstellt werden:
    @@ -10098,7 +10250,8 @@ sub bdump { Die Varianten unterscheiden sich hinsichtlich des ausführenden Systems, des Erstellungsortes, der Attributverwendung, des erzielten Ergebnisses und der benötigten Hardwareressourcen.
    Die Option "clientSide" benötigt z.B. eine leistungsfähigere Hardware des FHEM-Servers, sichert aber alle - Tabellen inklusive eventuell angelegter Views. + Tabellen inklusive eventuell angelegter Views.
    + Mit dem Attribut "dumpCompress" kann eine Komprimierung der erstellten Dumpfiles eingeschaltet werden.

    -
  • restoreMySQL <File>.csv - importiert den Inhalt der history-Tabelle aus einem serverSide-Backup.
    +
  • restoreMySQL <File>.csv[.gzip] - importiert den Inhalt der history-Tabelle aus einem serverSide-Backup.
    Die Funktion stellt über eine Drop-Down Liste eine Dateiauswahl für den Restore zur Verfügung. Dazu ist das Verzeichnis "dumpDirRemote" des MySQL-Servers auf dem Client zu mounten und im Attribut "dumpDirLocal" dem DbRep-Device bekannt zu machen.
    - Es werden alle Files mit der Endung "csv" und deren Name mit der + Es werden alle Files mit der Endung "csv[.gzip]" und deren Name mit der verbundenen Datenbank beginnt (siehe Internal DATABASE), aufgelistet .


  • -
  • restoreSQLite <File>.sqlitebkp - stellt das Backup einer SQLite-Datenbank wieder her.
    +
  • restoreSQLite <File>.sqlitebkp[.gzip] - stellt das Backup einer SQLite-Datenbank wieder her.
    Die Funktion stellt über eine Drop-Down Liste die für den Restore zur Verfügung stehenden Dateien zur Verfügung. Die aktuell in der Zieldatenbank enthaltenen Daten werden gelöscht bzw. überschrieben. - Es werden alle Files mit der Endung "sqlitebkp" und deren Name mit dem Namen der + Es werden alle Files mit der Endung "sqlitebkp[.gzip]" und deren Name mit dem Namen der verbundenen Datenbank beginnt, aufgelistet .


  • @@ -10782,6 +10976,8 @@ sub bdump {
  • dumpComment - User-Kommentar. Er wird im Kopf des durch den Befehl "dumpMyQL clientSide" erzeugten Dumpfiles eingetragen.

  • + +
  • dumpCompress - wenn gesetzt, werden die Dumpfiles nach "dumpMySQL" bzw. "dumpSQLite" komprimiert

  • dumpDirLocal - Zielverzeichnis für die Erstellung von Dumps mit "dumpMySQL clientSide". default: "{global}{modpath}/log/" auf dem FHEM-Server.