diff --git a/fhem/contrib/DS_Starter/93_DbRep.pm b/fhem/contrib/DS_Starter/93_DbRep.pm index c96f2484a..73a2814bd 100644 --- a/fhem/contrib/DS_Starter/93_DbRep.pm +++ b/fhem/contrib/DS_Starter/93_DbRep.pm @@ -1,5 +1,5 @@ ########################################################################################################## -# $Id: 93_DbRep.pm 27184 2023-02-10 19:47:19Z DS_Starter $ +# $Id: 93_DbRep.pm 27340 2023-03-19 07:45:02Z DS_Starter $ ########################################################################################################## # 93_DbRep.pm # @@ -59,6 +59,9 @@ no if $] >= 5.017011, warnings => 'experimental::smartmatch'; # Version History intern my %DbRep_vNotesIntern = ( + "8.52.2" => "28.03.2023 diffValue ", + "8.52.1" => "19.03.2023 fix Perl Warnings ", + "8.52.0" => "17.02.2023 get utf8mb4 info by connect db and set connection collation accordingly, new setter migrateCollation ", "8.51.6" => "11.02.2023 fix execute DbRep_afterproc after generating readings ". "Forum: https://forum.fhem.de/index.php/topic,53584.msg1262970.html#msg1262970 ". "fix MySQL 50mostFreqLogsLast2days ", @@ -407,9 +410,10 @@ my %dbrep_hmainf = ( deviceRename => { fn => "DbRep_changeDevRead", fndone => "DbRep_changeDone", fnabort => "DbRep_ParseAborted", pk => "RUNNING_PID", timeset => 0, dobp => 1, table => "history", renmode => "devren" }, readingRename => { fn => "DbRep_changeDevRead", fndone => "DbRep_changeDone", fnabort => "DbRep_ParseAborted", pk => "RUNNING_PID", timeset => 0, dobp => 1, table => "history", renmode => "readren" }, changeValue => { fn => "DbRep_changeVal", fndone => "DbRep_changeDone", fnabort => "DbRep_ParseAborted", pk => "RUNNING_PID", timeset => 1, dobp => 1, table => "history", renmode => "changeval" }, + migrateCollation => { fn => "DbRep_migCollation", fndone => "DbRep_migCollation_Done", fnabort => "DbRep_ParseAborted", pk => "RUNNING_PID", timeset => 0, dobp => 1 }, ); -my %dbrep_havgfn = ( # Schemafunktionen von averageValue +my %dbrep_havgfn = ( # Schemafunktionen von averageValue avgArithmeticMean => { fn => \&_DbRep_avgArithmeticMean }, avgDailyMeanGWS => { fn => \&_DbRep_avgDailyMeanGWS }, avgDailyMeanGWSwithGTS => { fn => \&_DbRep_avgDailyMeanGWS }, @@ -618,6 +622,15 @@ sub DbRep_Set { $hl .= ",___restore_sqlhistory___"; } + my $collation = join ',', qw ( utf8mb4_bin + utf8mb4_general_ci + utf8_bin + utf8_general_ci + latin1_bin + latin1_general_ci + latin1_general_cs + ); + my $specials = "50mostFreqLogsLast2days"; $specials .= ",allDevCount"; $specials .= ",allDevReadCount"; @@ -659,6 +672,7 @@ sub DbRep_Set { (($hash->{ROLE} ne "Agent") ? "averageValue:display,writeToDB,writeToDBSingle,writeToDBSingleStart,writeToDBInTime " : ""). (($hash->{ROLE} ne "Agent") ? "delSeqDoublets:adviceRemain,adviceDelete,delete " : ""). (($hash->{ROLE} ne "Agent" && $dbmodel =~ /MYSQL/) ? "dumpMySQL:clientSide,serverSide " : ""). + (($hash->{ROLE} ne "Agent" && $dbmodel =~ /MYSQL/) ? "migrateCollation:".$collation." " : ""). (($hash->{ROLE} ne "Agent" && $dbmodel =~ /SQLITE/) ? "dumpSQLite:noArg " : ""). (($hash->{ROLE} ne "Agent" && $dbmodel =~ /SQLITE/) ? "repairSQLite " : ""). (($hash->{ROLE} ne "Agent" && $dbmodel =~ /MYSQL/) ? "optimizeTables:showInfo,execute " : ""). @@ -851,6 +865,12 @@ sub DbRep_Set { return; } + if ($opt eq "migrateCollation" && $hash->{ROLE} ne "Agent") { + DbRep_setLastCmd (@a); + DbRep_Main ($hash, $opt, $prop); + return; + } + if ($opt eq "index") { DbRep_setLastCmd (@a); Log3 ($name, 3, "DbRep $name - ################################################################"); @@ -1054,7 +1074,7 @@ sub DbRep_Set { if($prop eq "50mostFreqLogsLast2days") { $sqlcmd = "select Device, reading, count(0) AS `countA` from history where TIMESTAMP > (NOW() - INTERVAL 2 DAY) group by DEVICE, READING order by countA desc, DEVICE limit 50;" if($dbmodel =~ /MYSQL/); - $sqlcmd = "select Device, reading, count(0) AS `countA` from history where TIMESTAMP > ('now' - '2 days') group by DEVICE, READING order by countA desc, DEVICE limit 50;" if($dbmodel =~ /SQLITE/); + $sqlcmd = "select Device, reading, count(0) AS `countA` from history where TIMESTAMP > datetime('now' ,'-2 days') group by DEVICE, READING order by countA desc, DEVICE limit 50;" if($dbmodel =~ /SQLITE/); $sqlcmd = "select Device, reading, count(0) AS countA from history where TIMESTAMP > (NOW() - INTERVAL '2' DAY) group by DEVICE, READING order by countA desc, DEVICE limit 50;" if($dbmodel =~ /POSTGRESQL/); } elsif ($prop eq "allDevReadCount") { @@ -1360,7 +1380,7 @@ sub DbRep_Get { @cmd = split /\s+/, $sqlcmd; $sqlcmd = join ' ', @cmd; - + DbRep_setLastCmd ($name, $opt, $sqlcmd); if ($sqlcmd =~ m/^\s*delete/is && !AttrVal($name, "allowDeletion", undef)) { @@ -1773,16 +1793,27 @@ sub DbRep_Attr { delete($attr{$name}{timeOlderThan}); delete($attr{$name}{timeYearPeriod}); } - if ($aName =~ /ftpTimeout|timeout|diffAccept/) { + + if ($aName =~ /ftpTimeout|timeout/) { unless ($aVal =~ /^[0-9]+$/) { return " The Value of $aName is not valid. Use only figures 0-9 without decimal places !"; } } + + if ($aName =~ /diffAccept/) { + my ($sign, $daval) = DbRep_ExplodeDiffAcc ($aVal); + + if (!$daval) { + return " The Value of $aName is not valid. Use only figures 0-9 without decimal places !"; + } + } + if ($aName eq "readingNameMap") { unless ($aVal =~ m/^[A-Za-z\d_\.-]+$/) { return " Unsupported character in $aName found. Use only A-Z a-z _ . -"; } } + if ($aName eq "timeDiffToNow") { unless ($aVal =~ /^[0-9]+$/ || $aVal =~ /^\s*[ydhms]:([\d]+)\s*/ && $aVal !~ /.*,.*/ ) { return "The Value of \"$aName\" isn't valid. Set simple seconds like \"86400\" or use form like \"y:1 d:10 h:6 m:12 s:20\". Refer to commandref !"; @@ -1791,6 +1822,7 @@ sub DbRep_Attr { delete($attr{$name}{timestamp_end}); delete($attr{$name}{timeYearPeriod}); } + if ($aName eq "timeOlderThan") { unless ($aVal =~ /^[0-9]+$/ || $aVal =~ /^\s*[ydhms]:([\d]+)\s*/ && $aVal !~ /.*,.*/ ) { return "The Value of \"$aName\" isn't valid. Set simple seconds like \"86400\" or use form like \"y:1 d:10 h:6 m:12 s:20\". Refer to commandref !"; @@ -1799,6 +1831,7 @@ sub DbRep_Attr { delete($attr{$name}{timestamp_end}); delete($attr{$name}{timeYearPeriod}); } + if ($aName eq "dumpMemlimit" || $aName eq "dumpSpeed") { unless ($aVal =~ /^[0-9]+$/) { return "The Value of $aName is not valid. Use only figures 0-9 without decimal places."; @@ -1817,9 +1850,11 @@ sub DbRep_Attr { } } } + if ($aName eq "ftpUse") { delete($attr{$name}{ftpUseSSL}); } + if ($aName eq "ftpUseSSL") { delete($attr{$name}{ftpUse}); } @@ -2141,12 +2176,12 @@ sub DbRep_getInitData { $rt = $rt.",".$brt; $opt = DbRep_trim ($opt) if($opt); - + if($prop) { $prop = DbRep_trim ($prop); $prop = encode_base64 ($prop, ""); } - + $err = q{}; return "$name|$err|$mints|$rt|$opt|$prop|$fret|$idxstate|$grants|$enc|$encc"; @@ -2516,7 +2551,7 @@ sub DbRep_Main { my ($epoch_seconds_begin,$epoch_seconds_end,$runtime_string_first,$runtime_string_next); - if($dbrep_hmainf{$opt} && defined &{$dbrep_hmainf{$opt}{fn}}) { + if($dbrep_hmainf{$opt} && exists &{$dbrep_hmainf{$opt}{fn}}) { $params = { hash => $hash, name => $name, @@ -3283,14 +3318,14 @@ sub DbRep_averval { my ($IsTimeSet,$IsAggrSet) = DbRep_checktimeaggr($hash); # ist Zeiteingrenzung und/oder Aggregation gesetzt ? (wenn ja -> "?" in SQL sonst undef) my @ts = split "\\|", $ts; # Timestampstring to Array - + $paref->{qlf} = $qlf; $paref->{tsaref} = \@ts; $paref->{dbmodel} = $dbmodel; $paref->{IsTimeSet} = $IsTimeSet; $paref->{IsAggrSet} = $IsAggrSet; $paref->{dbh} = $dbh; - + Log3 ($name, 4, "DbRep $name - averageValue calculation sceme: ".$acf); Log3 ($name, 5, "DbRep $name - IsTimeSet: $IsTimeSet, IsAggrSet: $IsAggrSet"); Log3 ($name, 5, "DbRep $name - Timestamp-Array: \n@ts"); @@ -3314,7 +3349,7 @@ sub DbRep_averval { } no warnings 'uninitialized'; - + $arrstr = encode_base64($arrstr, ""); # Daten müssen als Einzeiler zurückgegeben werden $device = encode_base64($device, ""); $gtsstr = encode_base64($gtsstr, ""); @@ -3331,7 +3366,7 @@ return "$name|$err|$arrstr|$device|$reading|$rt|$irowdone|$gtsstr|$gtsreached"; #################################################################################################### sub _DbRep_avgArithmeticMean { my $paref = shift; - + my $hash = $paref->{hash}; my $name = $paref->{name}; my $table = $paref->{table}; @@ -3343,27 +3378,27 @@ sub _DbRep_avgArithmeticMean { my $dbh = $paref->{dbh}; my $IsTimeSet = $paref->{IsTimeSet}; my $IsAggrSet = $paref->{IsAggrSet}; - + my ($err, $sth, $sql, $arrstr, $wrstr); my (@rsf, @rsn); - + my $aval = (DbRep_checktimeaggr($hash))[2]; $qlf = 'avgam'; my $addon = q{}; my $selspec = 'AVG(VALUE)'; - + if ($dbmodel eq "POSTGRESQL") { $selspec = 'AVG(VALUE::numeric)'; } - + for my $row (@{$tsaref}) { my @ar = split "#", $row; my $runtime_string = $ar[0]; my $runtime_string_first = $ar[1]; my $runtime_string_next = $ar[2]; - + my $avg = '-'; - + if ($IsTimeSet || $IsAggrSet) { $sql = DbRep_createSelectSql($hash,$table,$selspec,$device,$reading,"'$runtime_string_first'","'$runtime_string_next'",$addon); } @@ -3376,9 +3411,9 @@ sub _DbRep_avgArithmeticMean { my @line = $sth->fetchrow_array(); $avg = $line[0] if($line[0]); - + Log3 ($name, 5, "DbRep $name - SQL result: $avg "); - + if($aval eq "hour") { @rsf = split /[ :]/, $runtime_string_first; @rsn = split /[ :]/, $runtime_string_next; @@ -3394,19 +3429,19 @@ sub _DbRep_avgArithmeticMean { @rsn = split " ", $runtime_string_next; $arrstr .= $runtime_string."#".$avg."#".$rsf[0]."|"; } - + next if($avg eq '-'); # Schreiben von '-' als Durchschnitt verhindern - + my @wsf = split " ", $runtime_string_first; my @wsn = split " ", $runtime_string_next; my $wsft = $wsf[1] ? '_'.$wsf[1] : q{}; my $wsnt = $wsn[1] ? '_'.$wsn[1] : q{}; - + $wrstr .= $runtime_string."#".$avg."#".$wsf[0].$wsft."#".$wsn[0].$wsnt."|"; # Kombi zum Rückschreiben in die DB } - + $sth->finish; - + return ($err, $arrstr, $wrstr, $qlf); } @@ -3423,7 +3458,7 @@ return ($err, $arrstr, $wrstr, $qlf); #################################################################################################### sub _DbRep_avgDailyMeanGWS { my $paref = shift; - + my $hash = $paref->{hash}; my $name = $paref->{name}; my $table = $paref->{table}; @@ -3432,24 +3467,24 @@ sub _DbRep_avgDailyMeanGWS { my $qlf = $paref->{qlf}; my $tsaref = $paref->{tsaref}; my $dbh = $paref->{dbh}; - + my ($err, $sth, $arrstr, $wrstr, $gtsreached); my (@rsf, @rsn); - + my ($gts,$gtsstr) = (0, q{}); # Variablen für Grünlandtemperatursumme GTS - + my $aval = (DbRep_checktimeaggr($hash))[2]; my $acf = AttrVal ($name, 'averageCalcForm', 'avgArithmeticMean'); # Festlegung Berechnungsschema f. Mittelwert my $addon = "ORDER BY TIMESTAMP DESC LIMIT 1"; my $selspec = "VALUE"; $qlf = "avgdmgws"; - + for my $row (@{$tsaref}) { my @ar = split "#", $row; my $runtime_string = $ar[0]; my $runtime_string_first = $ar[1]; my $runtime_string_next = $ar[2]; - + my $sum = 0; my $anz = 0; # Anzahl der Messwerte am Tag my ($t01,$t07,$t13,$t19); # Temperaturen der Haupttermine @@ -3508,12 +3543,14 @@ sub _DbRep_avgDailyMeanGWS { my @wsf = split " ", $runtime_string_first; my @wsn = split " ", $runtime_string_next; + my $wsft = $wsf[1] ? '_'.$wsf[1] : q{}; + my $wsnt = $wsn[1] ? '_'.$wsn[1] : q{}; - $wrstr .= $runtime_string."#".$sum."#".$wsf[0]."_".$wsf[1]."#".$wsn[0]."_".$wsn[1]."|"; # Kombi zum Rückschreiben in die DB + $wrstr .= $runtime_string."#".$sum."#".$wsf[0].$wsft."#".$wsn[0].$wsnt."|"; # Kombi zum Rückschreiben in die DB ### Grünlandtemperatursumme lt. https://de.wikipedia.org/wiki/Gr%C3%BCnlandtemperatursumme ### my ($y,$m,$d) = split "-", $runtime_string; - + if ($acf eq 'avgDailyMeanGWSwithGTS' && looks_like_number($sum)) { $m = DbRep_removeLeadingZero ($m); $d = DbRep_removeLeadingZero ($d); @@ -3521,7 +3558,7 @@ sub _DbRep_avgDailyMeanGWS { my $f = $sum <= 0 ? 0 : $m >= 3 ? 1.00 : # Faktorenberechnung lt. https://de.wikipedia.org/wiki/Gr%C3%BCnlandtemperatursumme - $m == 2 ? 0.75 : + $m == 2 ? 0.75 : 0.5; $gts += $sum*$f; @@ -3533,16 +3570,16 @@ sub _DbRep_avgDailyMeanGWS { $gtsstr .= $runtime_string."#".$gts."#".$rsf[0]."|"; } } - + $sth->finish; - + return ($err, $arrstr, $wrstr, $qlf, $gtsstr, $gtsreached); } #################################################################################################### # averageValue Typ avgTimeWeightMean # zeitgewichteter Mittelwert -# +# # http://massmatics.de/merkzettel/#!837:Gewichteter_Mittelwert # # $tsum = timestamp letzter Messpunkt - timestamp erster Messpunkt @@ -3556,7 +3593,7 @@ return ($err, $arrstr, $wrstr, $qlf, $gtsstr, $gtsreached); #################################################################################################### sub _DbRep_avgTimeWeightMean { my $paref = shift; - + my $hash = $paref->{hash}; my $name = $paref->{name}; my $table = $paref->{table}; @@ -3567,27 +3604,27 @@ sub _DbRep_avgTimeWeightMean { my $dbh = $paref->{dbh}; my $IsTimeSet = $paref->{IsTimeSet}; my $IsAggrSet = $paref->{IsAggrSet}; - + my ($err, $sth, $sql, $arrstr, $wrstr, $bin_end, $val1); my (@rsf, @rsn); - - my $aval = (DbRep_checktimeaggr($hash))[2]; + + my $aval = (DbRep_checktimeaggr($hash))[2]; $qlf = 'avgtwm'; my $selspec = 'TIMESTAMP,VALUE'; my $addon = 'ORDER BY TIMESTAMP ASC'; my $addonl = 'ORDER BY TIMESTAMP DESC LIMIT 1'; - + for my $row (@{$tsaref}) { my @ar = split "#", $row; my $runtime_string = $ar[0]; my $runtime_string_first = $ar[1]; - my $runtime_string_next = $ar[2]; + my $runtime_string_next = $ar[2]; + + my ($tf,$tl,$tn,$to,$dt,$val); - my ($tf,$tl,$tn,$to,$dt,$val); - if ($bin_end) { # das $bin_end des letzten Bin ist der effektive Zeitpunkt des letzten Datenwertes $tf = $bin_end; # der vorherigen Periode, die in die aktuelle Periode übernommen wird - } + } else { # dies ist der erste Mittelungsplatz, und mit einem "Peek-back-in-time" wird versucht, den Wert unmittelbar vor der Startzeit zu ermitteln my ($year,$month,$day,$hour,$min,$sec) = $runtime_string_first =~ m/(\d+)-(\d+)-(\d+)\s(\d+):(\d+):(\d+)/xs; my $time = timelocal ($sec,$min,$hour,$day,$month-1,$year); @@ -3599,31 +3636,31 @@ sub _DbRep_avgTimeWeightMean { $time -= 24 * 3600; # um 1 Tag zurückblicken } elsif ($aval eq 'week') { - $time -= 7 * 24 * 3600; # um 1 Woche zurückblicken + $time -= 7 * 24 * 3600; # um 1 Woche zurückblicken } else { $time -= 30 * 24 * 3600; # um 1 Monat zurückblicken - }; - - my $newtime_string = strftime ("%Y-%m-%d %H:%M:%S", localtime ($time)); + }; + + my $newtime_string = strftime ("%Y-%m-%d %H:%M:%S", localtime ($time)); $sql = DbRep_createSelectSql($hash, $table, $selspec, $device, $reading, "'$newtime_string'", "'$runtime_string_first'", $addonl); - + ($err, $sth) = DbRep_prepareExecuteQuery ($name, $dbh, $sql); return $err if ($err); - + my @twm_array = map { $_->[0]."_ESC_".$_->[1] } @{$sth->fetchall_arrayref()}; - + for my $twmrow (@twm_array) { ($tn,$val1) = split "_ESC_", $twmrow; - $val1 = DbRep_numval ($val1); # nichtnumerische Zeichen eliminieren - $bin_end = $runtime_string_first; # der letzte Wert vor dem vollständigen Zeitrahmen wird auf den Beginn des Zeitrahmens "gefälscht" + $val1 = DbRep_numval ($val1); # nichtnumerische Zeichen eliminieren + $bin_end = $runtime_string_first; # der letzte Wert vor dem vollständigen Zeitrahmen wird auf den Beginn des Zeitrahmens "gefälscht" $tf = $runtime_string_first; }; - } + } my $tsum = 0; my $sum = 0; - + if ($IsTimeSet || $IsAggrSet) { $sql = DbRep_createSelectSql($hash, $table, $selspec, $device, $reading, "'$runtime_string_first'", "'$runtime_string_next'", $addon); } @@ -3635,8 +3672,8 @@ sub _DbRep_avgTimeWeightMean { return $err if ($err); my @twm_array = map { $_->[0]."_ESC_".$_->[1] } @{$sth->fetchall_arrayref()}; - - if ($bin_end) { # der letzte Datenwert aus dem vorherigen Bin wird dem aktuellen Bin vorangestellt, + + if ($bin_end && $val1) { # der letzte Datenwert aus dem vorherigen Bin wird dem aktuellen Bin vorangestellt, unshift @twm_array, $bin_end.'_ESC_'.$val1; # wobei das vorherige $bin_end als Zeitstempel verwendet wird } @@ -3646,7 +3683,7 @@ sub _DbRep_avgTimeWeightMean { $bin_end .='-01' if (!$mmf); $bin_end .='-01' if (!$ddf); $bin_end .=' 00' if (!$hhf); - $bin_end .=':00' if (!$minf); + $bin_end .=':00' if (!$minf); $bin_end .=':00' if (!$secf); for my $twmrow (@twm_array) { @@ -3664,22 +3701,22 @@ sub _DbRep_avgTimeWeightMean { $dt = $tn - $to; $tsum += $dt; # Bildung der Zeitsumme für die spätere Division - - $sum += $val1 * $dt if ($val1); # die Division durch die Gesamtzeit wird am Ende, außerhalb der Schleife durchgeführt + + $sum += $val1 * $dt if ($val1); # die Division durch die Gesamtzeit wird am Ende, außerhalb der Schleife durchgeführt $val1 = $val; $to = $tn; Log3 ($name, 5, "DbRep $name - data element: $twmrow"); Log3 ($name, 5, "DbRep $name - time sum: $tsum, delta time: $dt, value: $val1, twm: ".($tsum ? $val1*($dt/$tsum) : 0)); } - - $dt = timelocal($secf, $minf, $hhf, $ddf, $mmf-1, $yyyyf-1900) - $to; # die Zeitspanne des letzten Datenwertes in diesem Bin wird für diesen Bin berücksichtigt - # $dt ist das Zeitgewicht des letzten Wertes in diesem Bin + + $dt = timelocal($secf, $minf, $hhf, $ddf, $mmf-1, $yyyyf-1900); # die Zeitspanne des letzten Datenwertes in diesem Bin wird für diesen Bin berücksichtigt + $dt -= $to if ($to); # $dt ist das Zeitgewicht des letzten Wertes in diesem Bin $tsum += $dt; $sum += $val1 * $dt if ($val1); $sum /= $tsum if ($tsum > 0); - $sum = "insufficient values" if ($sum == 0); - + $sum = "insufficient values" if ($sum == 0); + if($aval eq "hour") { @rsf = split /[ :]/,$runtime_string_first; @rsn = split /[ :]/,$runtime_string_next; @@ -3695,15 +3732,15 @@ sub _DbRep_avgTimeWeightMean { @rsn = split " ",$runtime_string_next; $arrstr .= $runtime_string."#".$sum."#".$rsf[0]."|"; } - + $runtime_string_first =~ s/\s/_/xs; $runtime_string_next =~ s/\s/_/xs; - + $wrstr .= $runtime_string."#".$sum."#".$runtime_string_first."#".$runtime_string_next."|"; # Kombi zum Rückschreiben in die DB } - + $sth->finish; - + return ($err, $arrstr, $wrstr, $qlf); } @@ -3735,9 +3772,9 @@ sub DbRep_avervalDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state - + return; } @@ -3780,7 +3817,7 @@ sub DbRep_avervalDone { ReadingsBulkUpdateValue ($hash, "reachedGTSthreshold", $gtsreached) if($gtsreached); my @arr = split "\\|", $arrstr; - + for my $row (@arr) { my @a = split "#", $row; my $runtime_string = $a[0]; @@ -3808,9 +3845,9 @@ sub DbRep_avervalDone { ReadingsBulkUpdateValue ($hash, "db_lines_processed", $irowdone) if($hash->{LASTCMD} =~ /writeToDB/); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state - + return; } @@ -3938,9 +3975,9 @@ sub DbRep_countDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -3983,7 +4020,7 @@ sub DbRep_countDone { ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -4175,9 +4212,9 @@ sub DbRep_maxvalDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state - + return; } @@ -4218,7 +4255,7 @@ sub DbRep_maxvalDone { ReadingsBulkUpdateValue ($hash, "db_lines_processed", $irowdone) if($hash->{LASTCMD} =~ /writeToDB|deleteOther/); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -4404,9 +4441,9 @@ sub DbRep_minvalDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -4449,7 +4486,7 @@ sub DbRep_minvalDone { ReadingsBulkUpdateValue ($hash, "db_lines_processed", $irowdone) if($hash->{LASTCMD} =~ /writeToDB|deleteOther/); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -4491,10 +4528,13 @@ sub DbRep_diffval { $selspec = "TIMESTAMP,VALUE"; } - my $st = [gettimeofday]; # SQL-Startzeit - my @row_array; my @array; + + my $difflimit = AttrVal ($name, 'diffAccept', 20); # legt fest, bis zu welchem Wert Differenzen akzeptiert werden (Ausreißer eliminieren) + my ($sign, $dlim) = DbRep_ExplodeDiffAcc ($difflimit); # $sign -> Vorzeichen (+-) + + my $st = [gettimeofday]; # SQL-Startzeit for my $row (@ts) { # DB-Abfrage zeilenweise für jeden Array-Eintrag my @a = split("#", $row); @@ -4509,10 +4549,10 @@ sub DbRep_diffval { } if ($IsTimeSet || $IsAggrSet) { - $sql = DbRep_createSelectSql($hash,$table,$selspec,$device,$reading,"'$runtime_string_first'","'$runtime_string_next'",'ORDER BY TIMESTAMP'); + $sql = DbRep_createSelectSql($hash, $table, $selspec, $device , $reading, "'$runtime_string_first'", "'$runtime_string_next'", 'ORDER BY TIMESTAMP'); } else { - $sql = DbRep_createSelectSql($hash,$table,$selspec,$device,$reading,undef,undef,'ORDER BY TIMESTAMP'); + $sql = DbRep_createSelectSql($hash, $table, $selspec, $device, $reading, undef, undef, 'ORDER BY TIMESTAMP'); } ($err, $sth) = DbRep_prepareExecuteQuery ($name, $dbh, $sql); @@ -4528,7 +4568,7 @@ sub DbRep_diffval { my @sp; my $dse = 0; my $vold; - my @sqlite_array; + my @db_array; for my $row (@array) { @sp = split("[ \t][ \t]*", $row, 4); @@ -4536,20 +4576,34 @@ sub DbRep_diffval { my $timestamp = $sp[2] ? $sp[1]." ".$sp[2] : $sp[1]; my $vnew = $sp[3]; $vnew =~ tr/\n//d; - + if (!DbRep_IsNumeric ($vnew)) { # Test auf $value = "numeric" Log3 ($name, 3, "DbRep $name - WARNING - dataset has no numeric value >$vnew< and is ignored\ntimestamp >$timestamp<, device >$device<, reading >$reading<"); next; } - $dse = $vold && (($vnew-$vold) > 0) ? ($vnew-$vold) : 0; + #$dse = $vold && (($vnew-$vold) > 0) ? ($vnew-$vold) : 0; + #@sp = $runtime_string." ".$timestamp." ".$vnew." ".$dse."\n"; + #$vold = $vnew; + + if (!defined $vold) { + $vold = $vnew; + } + + if ($sign =~ /\+-/xs) { # sowohl positive als auch negative Abweichung auswerten + $dse = $vnew - $vold; + } + else { + $dse = ($vnew - $vold) > 0 ? ($vnew - $vold) : 0; # nur positive Abweichung auswerten + } + @sp = $runtime_string." ".$timestamp." ".$vnew." ".$dse."\n"; $vold = $vnew; - push @sqlite_array, @sp; + push @db_array, @sp; } - @array = @sqlite_array; + @array = @db_array; } } @@ -4578,8 +4632,6 @@ sub DbRep_diffval { $sth->finish; $dbh->disconnect; - my $difflimit = AttrVal($name, "diffAccept", "20"); # legt fest, bis zu welchem Wert Differenzen akzeptiert werden (Ausreißer eliminieren) - # Berechnung diffValue aus Selektionshash my %rh = (); # Ergebnishash, wird alle Ergebniszeilen enthalten my %ch = (); # counthash, enthält die Anzahl der verarbeiteten Datasets pro runtime_string @@ -4604,9 +4656,9 @@ sub DbRep_diffval { my $value = $a[3] ? $a[3] : 0; my $diff = $a[4] ? $a[4] : 0; - $timestamp =~ s/\s+$//g; # Leerzeichen am Ende $timestamp entfernen + $timestamp =~ s/\s+$//g; # Leerzeichen am Ende $timestamp entfernen - if (!DbRep_IsNumeric ($value)) { # Test auf $value = "numeric" + if (!DbRep_IsNumeric ($value)) { # Test auf $value = "numeric" $a[3] =~ s/\s+$//g; Log3 ($name, 2, "DbRep $name - ERROR - value isn't numeric in diffValue function. Faulty dataset was \nTIMESTAMP: $timestamp, DEVICE: $device, READING: $reading, VALUE: $value."); $err = encode_base64("Value isn't numeric. Faulty dataset was - TIMESTAMP: $timestamp, VALUE: $value", ""); @@ -4615,45 +4667,47 @@ sub DbRep_diffval { Log3 ($name, 5, "DbRep $name - Runtimestring: $runtime_string, DEVICE: $device, READING: $reading, TIMESTAMP: $timestamp, VALUE: $value, DIFF: $diff"); - $diff_current = $timestamp." ".$diff; # String ignorierter Zeilen erzeugen + $diff_current = $timestamp." ".$diff; # String ignorierter Zeilen erzeugen - if($diff > $difflimit) { + if(abs $diff > $dlim) { $rejectstr .= $diff_before." -> ".$diff_current."\n"; } $diff_before = $diff_current; - if ($runtime_string eq $lastruntimestring) { # Ergebnishash erzeugen + if ($runtime_string eq $lastruntimestring) { # Ergebnishash erzeugen if ($i == 1) { - $diff_total = $diff ? $diff : 0 if($diff <= $difflimit); + $diff_total = $diff ? $diff : 0 if(abs $diff <= $dlim); $rh{$runtime_string} = $runtime_string."|".$diff_total."|".$timestamp; $ch{$runtime_string} = 1 if($value); - $lval = $value; + $lval = $value ? $value : 0; $rslval = $runtime_string; } + $ch{$runtime_string}++ if(defined $a[3] && $i > 1); + if ($diff) { - if($diff <= $difflimit) { - $diff_total = $diff_total+$diff; + if(abs $diff <= $dlim) { + $diff_total = $diff_total + $diff; } $rh{$runtime_string} = $runtime_string."|".$diff_total."|".$timestamp; - $ch{$runtime_string}++ if($value && $i > 1); + #$ch{$runtime_string}++ if($value && $i > 1); $lval = $value; $rslval = $runtime_string; } } - else { # neuer Zeitabschnitt beginnt, ersten Value-Wert erfassen und Übertragsdifferenz bilden + else { # neuer Zeitabschnitt beginnt, ersten Value-Wert erfassen und Übertragsdifferenz bilden $lastruntimestring = $runtime_string; $i = 1; $uediff = $value - $lval if($value > $lval); $diff = $uediff; - $lval = $value if($value); # Übetrag über Perioden mit value = 0 hinweg ! + $lval = $value if($value); # Übetrag über Perioden mit value = 0 hinweg ! $rslval = $runtime_string; Log3 ($name, 5, "DbRep $name - balance difference of $uediff between $rslval and $runtime_string"); - $diff_total = $diff ? $diff : 0 if($diff <= $difflimit); + $diff_total = $diff ? $diff : 0 if(abs $diff <= $dlim); $rh{$runtime_string} = $runtime_string."|".$diff_total."|".$timestamp; $ch{$runtime_string} = 1 if($value); $uediff = 0; @@ -4684,7 +4738,7 @@ sub DbRep_diffval { } # Ergebnishash als Einzeiler zurückgeben - # ignorierte Zeilen ($diff > $difflimit) + # ignorierte Zeilen (abs $diff > $dlim) my $rowsrej; $rowsrej = encode_base64 ($rejectstr, "") if($rejectstr); @@ -4720,15 +4774,15 @@ sub DbRep_diffvalDone { my $device = $a[3] ? decode_base64($a[3]) : ''; my $reading = $a[4]; my $bt = $a[5]; - my $rowsrej = $a[6] ? decode_base64($a[6]) : ''; # String von Datensätzen die nicht berücksichtigt wurden (diff Schwellenwert Überschreitung) - my $ncpslist = $a[6] ? decode_base64($a[7]) : ''; # Hash von Perioden die nicht kalkuliert werden konnten "no calc in period" + my $rowsrej = $a[6] ? decode_base64($a[6]) : ''; # String von Datensätzen die nicht berücksichtigt wurden (diff Schwellenwert Überschreitung) + my $ncpslist = $a[6] ? decode_base64($a[7]) : ''; # Hash von Perioden die nicht kalkuliert werden konnten "no calc in period" my $irowdone = $a[8]; - my $ndp = AttrVal($name, "numDecimalPlaces", $dbrep_defdecplaces); - my $difflimit = AttrVal($name, "diffAccept", "20"); # legt fest, bis zu welchem Wert Differenzen akzeptoert werden (Ausreißer eliminieren)AttrVal($name, "diffAccept", "20"); - - my $hash = $defs{$name}; - + my $hash = $defs{$name}; + my $ndp = AttrVal ($name, "numDecimalPlaces", $dbrep_defdecplaces); + my $difflimit = AttrVal ($name, 'diffAccept', 20); # legt fest, bis zu welchem Wert Differenzen akzeptiert werden (Ausreißer eliminieren) + my ($sign, $dlim) = DbRep_ExplodeDiffAcc ($difflimit); + my $reading_runtime_string; Log3 ($name, 5, qq{DbRep $name - BlockingCall PID "$hash->{HELPER}{RUNNING_PID}{pid}" finished}); @@ -4738,9 +4792,9 @@ sub DbRep_diffvalDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -4795,13 +4849,13 @@ sub DbRep_diffvalDone { } ReadingsBulkUpdateValue ($hash, "db_lines_processed", $irowdone) if($hash->{LASTCMD} =~ /writeToDB/); - ReadingsBulkUpdateValue ($hash, "diff_overrun_limit_".$difflimit, $rowsrej) if($rowsrej); + ReadingsBulkUpdateValue ($hash, "diff_overrun_limit_".$dlim, $rowsrej) if($rowsrej); ReadingsBulkUpdateValue ($hash, "less_data_in_period", $ncpstr) if($ncpstr); - ReadingsBulkUpdateValue ($hash, "state", qq{WARNING - see readings 'less_data_in_period' or 'diff_overrun_limit_XX'}) + ReadingsBulkUpdateValue ($hash, "state", qq{WARNING - see readings 'less_data_in_period' or 'diff_overrun_limit_XX'}) if($ncpstr||$rowsrej); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -4933,9 +4987,9 @@ sub DbRep_sumvalDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -4973,7 +5027,7 @@ sub DbRep_sumvalDone { ReadingsBulkUpdateValue ($hash, "db_lines_processed", $irowdone) if($hash->{LASTCMD} =~ /writeToDB/); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -5055,9 +5109,9 @@ sub DbRep_del_Done { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -5087,7 +5141,7 @@ sub DbRep_del_Done { ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -5200,9 +5254,9 @@ sub DbRep_insertDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "insert"); # Befehl nach Procedure ausführen - + return; } @@ -5217,7 +5271,7 @@ sub DbRep_insertDone { ReadingsBulkUpdateValue ($hash, "data_inserted", $i_timestamp.", ".$i_device.", ".$i_type.", ".$i_event.", ".$i_reading.", ".$i_value.", ".$i_unit); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "insert"); # Befehl nach Procedure ausführen incl. state return; @@ -5337,9 +5391,9 @@ sub DbRep_currentfillupDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -5360,7 +5414,7 @@ sub DbRep_currentfillupDone { ReadingsBulkUpdateValue ($hash, "number_lines_inserted", $rowstr); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Table '$hash->{DATABASE}'.'current' filled up with rows: $rowstr"); @@ -5655,9 +5709,9 @@ sub DbRep_changeDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $renmode); # Befehl nach Procedure ausführen - + return; } @@ -5688,7 +5742,7 @@ sub DbRep_changeDone { ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $renmode); # Befehl nach Procedure ausführen incl. state if ($urow != 0) { @@ -5798,9 +5852,9 @@ sub DbRep_fetchrowsDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -5907,9 +5961,9 @@ sub DbRep_fetchrowsDone { ReadingsBulkUpdateValue ($hash, "state", "done - Warning: present rows exceed specified limit, adjust attribute limit") if($nrows-$limit>0); readingsEndUpdate($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state - + return; } @@ -6332,9 +6386,9 @@ sub DbRep_deldoubl_Done { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } @@ -6384,7 +6438,7 @@ sub DbRep_deldoubl_Done { ReadingsBulkUpdateValue ($hash, 'state', "done - Warning: not all items are shown, adjust attribute limit if you want see more") if($l >= $limit); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -6550,9 +6604,9 @@ sub DbRep_expfile_Done { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "export"); # Befehl nach Procedure ausführen - + return; } @@ -6571,7 +6625,7 @@ sub DbRep_expfile_Done { ReadingsBulkUpdateValue ($hash, $export_string, $nrows); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "export"); # Befehl nach Procedure ausführen incl. state return; @@ -6765,9 +6819,9 @@ sub DbRep_impfile_Done { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "import"); # Befehl nach Procedure ausführen - + return; } @@ -6781,7 +6835,7 @@ sub DbRep_impfile_Done { ReadingsBulkUpdateValue ($hash, $import_string, $irowdone); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "import"); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Number of imported datasets to $hash->{DATABASE} from file $infile: $irowdone"); @@ -6993,7 +7047,7 @@ sub DbRep_sqlCmdBlocking { $err = $failed eq "Timeout\n" ? $totxt : $failed; Log3 ($name, 2, "DbRep $name - $err"); - + my $encerr = encode_base64($err, ""); $sth->finish if($sth); @@ -7034,12 +7088,12 @@ sub DbRep_sqlCmdBlocking { Log3 ($name, 4, "DbRep $name - Number of entries processed in db $hash->{DATABASE}: $nrows"); readingsBeginUpdate ($hash); - + if (defined $data{DbRep}{$name}{sqlcache}{temp}) { # SQL incl. Formatierung aus Zwischenspeicherzwischenspeichern my $tmpsql = delete $data{DbRep}{$name}{sqlcache}{temp}; ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); } - + ReadingsBulkUpdateTimeState ($hash, undef, $rt, 'done'); readingsEndUpdate ($hash, 1); @@ -7180,12 +7234,12 @@ sub _DbRep_sqlBlckgErrorState { Log3 ($name, 2, "DbRep $name - ERROR - $err"); readingsBeginUpdate ($hash); - + if (defined $data{DbRep}{$name}{sqlcache}{temp}) { # SQL incl. Formatierung aus Zwischenspeicherzwischenspeichern my $tmpsql = delete $data{DbRep}{$name}{sqlcache}{temp}; ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); } - + ReadingsBulkUpdateValue ($hash, 'errortext', $err); ReadingsBulkUpdateValue ($hash, 'state', 'error'); readingsEndUpdate ($hash, 1); @@ -7264,21 +7318,21 @@ sub DbRep_sqlCmdDone { Log3 ($name, 5, qq{DbRep $name - BlockingCall PID "$hash->{HELPER}{RUNNING_PID}{pid}" finished}); delete($hash->{HELPER}{RUNNING_PID}); - + my $tmpsql = $data{DbRep}{$name}{sqlcache}{temp}; # SQL incl. Formatierung aus Zwischenspeicher holen if ($err) { readingsBeginUpdate ($hash); - ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); + ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); ReadingsBulkUpdateValue ($hash, "errortext", $err ); ReadingsBulkUpdateValue ($hash, "state", "error"); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen - + return; } - + DbRep_addSQLcmdCache ($name); # Drop-Down Liste bisherige sqlCmd-Befehle füllen und in Key-File sichern my ($rt,$brt) = split ",", $bt; @@ -7290,7 +7344,7 @@ sub DbRep_sqlCmdDone { no warnings 'uninitialized'; readingsBeginUpdate ($hash); - ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); + ReadingsBulkUpdateValue ($hash, 'sqlCmd', $tmpsql); ReadingsBulkUpdateValue ($hash, 'sqlResultNumRows', $nrows); if ($srf eq "sline") { @@ -7302,7 +7356,7 @@ sub DbRep_sqlCmdDone { my $res = ""; my @rows = split( /§/, $rowstring ); my $row; - + for $row ( @rows ) { $row =~ s/\|°escaped°\|/§/g; $row =~ s/$srs/\|/g if($srs !~ /\|/); @@ -7317,7 +7371,7 @@ sub DbRep_sqlCmdDone { my $res = ""; my @rows = split( /§/, $rowstring ); my $row; - + for $row ( @rows ) { $row =~ s/\|°escaped°\|/§/g; $res .= $row."
"; @@ -7332,7 +7386,7 @@ sub DbRep_sqlCmdDone { my $numd = ceil(log10($bigint)); my $formatstr = sprintf('%%%d.%dd', $numd, $numd); my $i = 0; - + for my $row ( @rows ) { $i++; $row =~ s/\|°escaped°\|/§/g; @@ -7347,7 +7401,7 @@ sub DbRep_sqlCmdDone { my $numd = ceil(log10($bigint)); my $formatstr = sprintf('%%%d.%dd', $numd, $numd); my $i = 0; - + for my $row ( @rows ) { $i++; $row =~ s/\|°escaped°\|/§/g; @@ -7360,7 +7414,7 @@ sub DbRep_sqlCmdDone { ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state return; @@ -7834,9 +7888,9 @@ sub DbRep_IndexDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "index"); # Befehl nach Procedure ausführen incl. state - + return; } @@ -7846,7 +7900,7 @@ sub DbRep_IndexDone { ReadingsBulkUpdateValue ($hash, "index_state", $ret); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "index"); # Befehl nach Procedure ausführen incl. state return; @@ -7858,27 +7912,27 @@ return; sub DbRep_IndexAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; Log3 ($name, 1, "DbRep $name -> BlockingCall $hash->{HELPER}{RUNNING_INDEX}{fn} pid:$hash->{HELPER}{RUNNING_INDEX}{pid} $cause"); ReadingsSingleUpdateValue ($hash, 'state', 'Abort', 0); - + my $erread = DbRep_afterproc ($hash, "index"); # Befehl nach Procedure ausführen $erread = ", ".(split("but", $erread))[1] if($erread); my $state = $cause.$erread; $dbh->disconnect() if(defined($dbh)); - + ReadingsSingleUpdateValue ($hash, "state", $state, 1); Log3 ($name, 2, "DbRep $name - Database index operation aborted due to \"$cause\" "); delete($hash->{HELPER}{RUNNING_INDEX}); - + return; } @@ -7959,7 +8013,7 @@ sub DbRep_optimizeTables { $err = _DbRep_setSessPragma ($name, $dbh, \$query); return "$name|$err" if ($err); - + $query = "VACUUM"; ($err, $sth) = DbRep_prepareExecuteQuery ($name, $dbh, $query, "VACUUM database $dbname...."); @@ -8138,9 +8192,9 @@ sub DbRep_OptimizeDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "optimize"); # Befehl nach Procedure ausführen - + return; } @@ -8153,7 +8207,7 @@ sub DbRep_OptimizeDone { ReadingsBulkUpdateValue ($hash, "SizeDbEnd_MB", $db_MB_end ); ReadingsBulkUpdateTime ($hash, $brt, undef); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "optimize"); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Optimize tables finished successfully. "); @@ -8833,9 +8887,9 @@ sub DbRep_DumpDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "dump"); # Befehl nach Procedure ausführen - + return; } @@ -8853,7 +8907,7 @@ sub DbRep_DumpDone { ReadingsBulkUpdateValue ($hash, "FTP_DumpFilesDeleted", $ffd) if($ffd); ReadingsBulkUpdateValue ($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "dump"); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Database dump finished successfully. "); @@ -8958,9 +9012,9 @@ sub DbRep_RepairDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "repair"); # Befehl nach Procedure ausführen - + return; } @@ -8969,7 +9023,7 @@ sub DbRep_RepairDone { readingsBeginUpdate ($hash); ReadingsBulkUpdateValue ($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "repair"); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Database repair $hash->{DATABASE} finished - total time used (hh:mm:ss): ".DbRep_sec2hms($brt)); @@ -9303,9 +9357,9 @@ sub DbRep_restoreDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "restore", $bfile); # Befehl nach Procedure ausführen - + return; } @@ -9316,7 +9370,7 @@ sub DbRep_restoreDone { ReadingsBulkUpdateValue ($hash, "RestoreRowsCurrent", $drc) if($drc); ReadingsBulkUpdateTime ($hash, $brt, undef); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "restore", $bfile); # Befehl nach Procedure ausführen incl. state Log3 ($name, 3, "DbRep $name - Database restore finished successfully. "); @@ -9453,9 +9507,9 @@ sub DbRep_syncStandbyDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "syncStandby"); # Befehl nach Procedure ausführen - + return; } @@ -9465,7 +9519,7 @@ sub DbRep_syncStandbyDone { ReadingsBulkUpdateValue ($hash, "number_lines_inserted_Standby", $irows); ReadingsBulkUpdateTime ($hash, $brt, $rt); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "syncStandby"); # Befehl nach Procedure ausführen incl. state return; @@ -9594,7 +9648,7 @@ sub DbRep_reduceLog { ($err, my $sth_del) = DbRep_prepareOnly ($name, $dbh, "DELETE FROM $table WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); return "$name|$err" if ($err); - + ($err, my $sth_delNull) = DbRep_prepareOnly ($name, $dbh, "DELETE FROM $table WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND VALUE IS NULL"); return "$name|$err" if ($err); @@ -9821,18 +9875,18 @@ sub _DbRep_rl_deleteDayRows { my $reading = $delRow->[3]; my $time = $delRow->[0]; my $value = $delRow->[4] // 'NULL'; - + if ($value eq 'NULL') { Log3 ($name, 5, "DbRep $name - DELETE FROM $table WHERE (DEVICE=$device) AND (READING=$reading) AND (TIMESTAMP=$time) AND VALUE IS $value"); - $sth_delNull->execute($device, $reading, $time); + $sth_delNull->execute($device, $reading, $time); } else { Log3 ($name, 5, "DbRep $name - DELETE FROM $table WHERE (DEVICE=$device) AND (READING=$reading) AND (TIMESTAMP=$time) AND (VALUE=$value)"); - $sth_del->execute($device, $reading, $time, $value); + $sth_del->execute($device, $reading, $time, $value); } - + $i++; my $params = { @@ -10412,9 +10466,9 @@ sub DbRep_reduceLogDone { if ($err) { ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); ReadingsSingleUpdateValue ($hash, "state", "error", 1); - + DbRep_afterproc ($hash, "reduceLog"); # Befehl nach Procedure ausführen - + return; } @@ -10422,19 +10476,198 @@ sub DbRep_reduceLogDone { ReadingsBulkUpdateValue ($hash, "background_processing_time", sprintf("%.2f", $brt)); ReadingsBulkUpdateValue ($hash, "reduceLogState", $ret); readingsEndUpdate ($hash, 1); - + DbRep_afterproc ($hash, "reduceLog"); # Befehl nach Procedure ausführen return; } +#################################################################################################### +# Migration DB / Tabellen Charset und Collation +#################################################################################################### +sub DbRep_migCollation { + my $paref = shift; + my $hash = $paref->{hash}; + my $name = $paref->{name}; + my $opt = $paref->{opt}; + my $collation = $paref->{prop}; + + my $db = $hash->{DATABASE}; + my $utf8 = $hash->{UTF8} // 0; + + my @se = (); + my ($sth, $table); + + my $bst = [gettimeofday]; # Background-Startzeit + + my $charset = (split '_', $collation, 2)[0]; + + my ($err,$dbh,$dbmodel) = DbRep_dbConnect($name); + return "$name|$err" if ($err); + + my $st = [gettimeofday]; # SQL-Startzeit + + # DB Migration + ############### + Log3 ($name, 3, "DbRep $name - migrate database >$db< collation to >$collation<, please be patient ..."); + + ($err, $sth) = DbRep_prepareExecuteQuery ($name, $dbh, qq(ALTER DATABASE $db CHARACTER SET = $charset COLLATE = $collation)); + return "$name|$err" if ($err); + + ($err, @se) = DbRep_prepareExec2Array ($name, $dbh, qq(SHOW VARIABLES LIKE 'collation_database')); + return "$name|$err" if ($err); + + my $dcs = @se ? $se[1] : 'no result'; + + Log3 ($name, 4, "DbRep $name - new Collation of database >$db< is >$dcs<"); + + # Tabelle history Migration + ############################# + ($err, my $hcs) = _DbRep_migCollTable ( {name => $name, + dbh => $dbh, + table => 'history', + db => $db, + charset => $charset, + collation => $collation + } + ); + return "$name|$err" if ($err); + + # Tabelle current Migration + ############################# + ($err, my $ccs) = _DbRep_migCollTable ( {name => $name, + dbh => $dbh, + table => 'current', + db => $db, + charset => $charset, + collation => $collation + } + ); + return "$name|$err" if ($err); + + Log3 ($name, 3, "DbRep $name - migration done"); + + $dbh->disconnect; + + my $rt = tv_interval($st); # SQL-Laufzeit ermitteln + my $brt = tv_interval($bst); # Background-Laufzeit ermitteln + $rt = $rt.",".$brt; + $err = q{}; + +return "$name|$err|$dcs|$ccs|$hcs|$rt|$opt"; +} + +#################################################################################################### +# Migration Tabellen Charset und Collation +#################################################################################################### +sub _DbRep_migCollTable { + my $paref = shift; + + my $name = $paref->{name}; + my $dbh = $paref->{dbh}; + my $table = $paref->{table}; + my $db = $paref->{db}; + my $charset = $paref->{charset}; + my $collation = $paref->{collation}; + + Log3 ($name, 3, "DbRep $name - migrate table >$table< collation to >$collation< ... be patient ..."); + + my ($err, $sth) = DbRep_prepareExecuteQuery ($name, $dbh, qq(ALTER TABLE $table CONVERT TO CHARACTER SET $charset COLLATE $collation)); + return $err if ($err); + + ($err, my @se) = DbRep_prepareExec2Array ($name, $dbh, qq(SELECT TABLE_COLLATION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = "$table" and TABLE_SCHEMA = "$db")); + return $err if ($err); + + my $col = @se ? $se[0] : 'no result'; + + Log3 ($name, 4, "DbRep $name - new Collation of table >$table< is >$col<"); + +return ($err, $col); +} + +#################################################################################################### +# Auswertungsroutine Migration DB / Tabellen Charset und Collation +#################################################################################################### +sub DbRep_migCollation_Done { + my $string = shift; + + my @a = split("\\|",$string); + my $name = $a[0]; + my $err = $a[1] ? decode_base64($a[1]) : ''; + my $dcs = $a[2]; + my $ccs = $a[3]; + my $hcs = $a[4]; + my $bt = $a[5]; + my $opt = $a[6]; + + my $hash = $defs{$name}; + + Log3 ($name, 5, qq{DbRep $name - BlockingCall PID "$hash->{HELPER}{RUNNING_PID}{pid}" finished}); + + delete($hash->{HELPER}{RUNNING_PID}); + + if ($err) { + ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); + ReadingsSingleUpdateValue ($hash, "state", "error", 1); + + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen + + return; + } + + my ($rt,$brt) = split ",", $bt; + + readingsBeginUpdate ($hash); + + ReadingsBulkUpdateValue ($hash, 'collation_database', $dcs); + ReadingsBulkUpdateValue ($hash, 'collation_table_current', $ccs); + ReadingsBulkUpdateValue ($hash, 'collation_table_history', $hcs); + + ReadingsBulkUpdateTime ($hash, $brt, $rt); + readingsEndUpdate ($hash, 1); + + DbRep_afterproc ($hash, $hash->{LASTCMD}); # Befehl nach Procedure ausführen incl. state + +return; +} + +#################################################################################################### +# Abbruchroutine Timeout DB-Abfrage +#################################################################################################### +sub DbRep_ParseAborted { + my $hash = shift; + my $cause = shift // "Timeout: process terminated"; + + my $name = $hash->{NAME}; + my $dbh = $hash->{DBH}; + + Log3 ($name, 5, qq{DbRep $name - BlockingCall PID "$hash->{HELPER}{RUNNING_PID}{pid}" finished}); + Log3 ($name, 1, "DbRep $name -> BlockingCall $hash->{HELPER}{RUNNING_PID}{fn} pid:$hash->{HELPER}{RUNNING_PID}{pid} $cause"); + + delete($hash->{HELPER}{RUNNING_PID}); + + ReadingsSingleUpdateValue ($hash, 'state', 'Abort', 0); + + my $erread = DbRep_afterproc ($hash, "command"); # Befehl nach Procedure ausführen + $erread = ", ".(split("but", $erread))[1] if($erread); + + my $state = $cause.$erread; + + $dbh->disconnect() if(defined($dbh)); + ReadingsSingleUpdateValue ($hash, "state", $state, 1); + + Log3 ($name, 2, "DbRep $name - Database command aborted: \"$cause\" "); + +return; +} + #################################################################################################### # Abbruchroutine Timeout reduceLog #################################################################################################### sub DbRep_reduceLogAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; @@ -10447,7 +10680,7 @@ sub DbRep_reduceLogAborted { my $state = $cause.$erread; $dbh->disconnect() if(defined($dbh)); - + ReadingsSingleUpdateValue ($hash, "state", $state, 1); Log3 ($name, 2, "DbRep $name - Database reduceLog aborted: \"$cause\" "); @@ -10464,7 +10697,7 @@ return; sub DbRep_restoreAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; @@ -10487,43 +10720,13 @@ sub DbRep_restoreAborted { return; } -#################################################################################################### -# Abbruchroutine Timeout DB-Abfrage -#################################################################################################### -sub DbRep_ParseAborted { - my $hash = shift; - my $cause = shift // "Timeout: process terminated"; - - my $name = $hash->{NAME}; - my $dbh = $hash->{DBH}; - - Log3 ($name, 5, qq{DbRep $name - BlockingCall PID "$hash->{HELPER}{RUNNING_PID}{pid}" finished}); - Log3 ($name, 1, "DbRep $name -> BlockingCall $hash->{HELPER}{RUNNING_PID}{fn} pid:$hash->{HELPER}{RUNNING_PID}{pid} $cause"); - - delete($hash->{HELPER}{RUNNING_PID}); - - ReadingsSingleUpdateValue ($hash, 'state', 'Abort', 0); - - my $erread = DbRep_afterproc ($hash, "command"); # Befehl nach Procedure ausführen - $erread = ", ".(split("but", $erread))[1] if($erread); - - my $state = $cause.$erread; - - $dbh->disconnect() if(defined($dbh)); - ReadingsSingleUpdateValue ($hash, "state", $state, 1); - - Log3 ($name, 2, "DbRep $name - Database command aborted: \"$cause\" "); - -return; -} - #################################################################################################### # Abbruchroutine Timeout DB-Dump #################################################################################################### sub DbRep_DumpAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; @@ -10554,7 +10757,7 @@ return; sub DbRep_OptimizeAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; @@ -10583,7 +10786,7 @@ return; sub DbRep_RepairAborted { my $hash = shift; my $cause = shift // "Timeout: process terminated"; - + my $name = $hash->{NAME}; my $dbh = $hash->{DBH}; my $dbloghash = $defs{$hash->{HELPER}{DBLOGDEVICE}}; @@ -10593,7 +10796,7 @@ sub DbRep_RepairAborted { # Datenbankverbindung in DbLog wieder öffenen my $dbl = $dbloghash->{NAME}; CommandSet(undef,"$dbl reopen"); - + ReadingsSingleUpdateValue ($hash, 'state', 'Abort', 0); my $erread = DbRep_afterproc ($hash, "repair"); # Befehl nach Procedure ausführen @@ -11216,15 +11419,15 @@ return ($devs,$devswc); } ###################################################################################### -# Erstelle Insert SQL-Schema für Tabelle mit/ohne primary key +# Erstelle Insert SQL-Schema für Tabelle mit/ohne primary key ###################################################################################### -sub DbRep_createInsertSQLscheme { +sub DbRep_createInsertSQLscheme { my $table = shift; my $dbmodel = shift; my $usepkh = shift; - + my $sql; - + if ($usepkh && $dbmodel eq 'MYSQL') { $sql = "INSERT IGNORE INTO $table (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"; } @@ -11242,11 +11445,11 @@ return $sql; } ###################################################################################### -# Erstelle Update SQL-Schema für Tabelle +# Erstelle Update SQL-Schema für Tabelle ###################################################################################### -sub DbRep_createUpdateSQLscheme { +sub DbRep_createUpdateSQLscheme { my $table = shift; - + my $sql = "UPDATE $table SET TIMESTAMP=?, DEVICE=?, READING=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE TIMESTAMP=? AND DEVICE=? AND READING=?"; return $sql; @@ -11306,7 +11509,21 @@ sub DbRep_dbConnect { if($utf8) { if($dbmodel eq "MYSQL") { $dbh->{mysql_enable_utf8} = 1; - $dbh->do('set names "UTF8"'); + + ($err, my @se) = DbRep_prepareExec2Array ($name, $dbh, "SHOW VARIABLES LIKE 'collation_database'"); + return $err if ($err); + + my $dbcharset = @se ? $se[1] : 'noresult'; + + Log3 ($name, 4, "DbRep $name - Database Character set is >$dbcharset<"); + + if ($dbcharset !~ /noresult|ucs2|utf16|utf32/ixs) { # Impermissible Client Character Sets -> https://dev.mysql.com/doc/refman/8.0/en/charset-connection.html + my $collation = $dbcharset; + $dbcharset = (split '_', $collation, 2)[0]; + + ($err, undef) = DbRep_dbhDo ($name, $dbh, qq(set names "$dbcharset" collate "$collation")); + return $err if ($err); + } } if($dbmodel eq "SQLITE") { @@ -11370,7 +11587,7 @@ return ($err, $sth); } #################################################################################################### -# SQL Query evaluieren und Return-String (bei Error in Verarbeitung) und $sth-String +# SQL Query evaluieren und Return Error-String oder $sth-String # bei Erfolg #################################################################################################### sub DbRep_prepareExecuteQuery { @@ -11381,12 +11598,12 @@ sub DbRep_prepareExecuteQuery { my $err = q{}; - my ($sth,$result); + my ($sth,$res); Log3 ($name, 4, "DbRep $name - $info"); - eval{ $sth = $dbh->prepare($sql); - $result = $sth->execute(); + eval{ $sth = $dbh->prepare($sql); + $res = $sth->execute(); 1; } or do { $err = encode_base64($@,""); @@ -11395,7 +11612,39 @@ sub DbRep_prepareExecuteQuery { $dbh->disconnect; }; -return ($err, $sth, $result); +return ($err, $sth, $res); +} + +#################################################################################################### +# SQL Query evaluieren und Return Error-String oder Ergebnis Array +# bei Erfolg +#################################################################################################### +sub DbRep_prepareExec2Array { + my $name = shift; + my $dbh = shift; + my $sql = shift; + my $info = shift // "SQL execute: $sql"; + + my $err = q{}; + my @sr = (); + + my ($sth,$res); + + Log3 ($name, 4, "DbRep $name - $info"); + + eval{ $sth = $dbh->prepare($sql); + $res = $sth->execute(); + 1; + } + or do { $err = encode_base64($@,""); + Log3 ($name, 2, "DbRep $name - ERROR - $@"); + $sth->finish if($sth); + $dbh->disconnect; + }; + + @sr = $sth->fetchrow_array; + +return ($err, @sr); } #################################################################################################### @@ -11429,7 +11678,7 @@ return ($err, $rv); #################################################################################################### sub DbRep_execInsertPrepared { my $paref = shift; - + my $name = $paref->{name}; my $sth = $paref->{sth}; my $timestamp = $paref->{timestamp}; @@ -11447,7 +11696,7 @@ sub DbRep_execInsertPrepared { or do { $err = encode_base64($@,""); Log3 ($name, 2, "DbRep $name - ERROR - $@"); }; - + $rv = 0 if($rv eq "0E0"); return ($err, $rv); @@ -11459,7 +11708,7 @@ return ($err, $rv); #################################################################################################### sub DbRep_execUpdatePrepared { my $paref = shift; - + my $name = $paref->{name}; my $sth = $paref->{sth}; my $timestamp = $paref->{timestamp}; @@ -11477,7 +11726,7 @@ sub DbRep_execUpdatePrepared { or do { $err = encode_base64($@,""); Log3 ($name, 2, "DbRep $name - ERROR - $@"); }; - + $rv = 0 if($rv eq "0E0"); return ($err, $rv); @@ -11668,11 +11917,11 @@ sub ReadingsSingleUpdateValue { my $name = $hash->{NAME}; readingsSingleUpdate($hash, $reading, $val, $ev); - + readingsBeginUpdate ($hash); DbRep_userexit ($name, $reading, $val); readingsEndUpdate ($hash, 1); - + DbRep_autoForward ($name, $reading, $val); return; @@ -11686,25 +11935,25 @@ sub ReadingsSingleUpdateTime { my $bpt = shift; my $spt = shift; my $evt = shift; - + my $name = $hash->{NAME}; if (AttrVal($name, "showproctime", 0)) { if (defined $bpt) { $bpt = sprintf "%.4f", $bpt; - + readingsSingleUpdate ($hash, "background_processing_time", $bpt, $evt); - + readingsBeginUpdate ($hash); DbRep_userexit ($name, "background_processing_time", $bpt); readingsEndUpdate ($hash, 1); } - + if (defined $spt) { $spt = sprintf "%.4f", $spt; - + readingsSingleUpdate ($hash, "sql_processing_time", $spt, $evt); - + readingsBeginUpdate ($hash); DbRep_userexit ($name, "sql_processing_time", $spt); readingsEndUpdate ($hash, 1); @@ -11740,7 +11989,7 @@ sub ReadingsBulkUpdateTimeState { my $brt = shift; my $rt = shift; my $sval = shift; - + my $name = $hash->{NAME}; if(AttrVal($name, 'showproctime', 0)) { @@ -11749,7 +11998,7 @@ sub ReadingsBulkUpdateTimeState { readingsBulkUpdate ($hash, "background_processing_time", $brt); DbRep_userexit ($name, "background_processing_time", $brt); } - + if (defined $rt) { $rt = sprintf "%.4f", $rt; readingsBulkUpdate ($hash, "sql_processing_time", $rt); @@ -11765,14 +12014,14 @@ return; } #################################################################################################### -# Readingsbulkupdate für processing_time, +# Readingsbulkupdate für processing_time, # readingsBeginUpdate und readingsEndUpdate muss vor/nach Funktionsaufruf gesetzt werden #################################################################################################### sub ReadingsBulkUpdateTime { my $hash = shift; my $bpt = shift; my $spt = shift; - + my $name = $hash->{NAME}; if(AttrVal($name, 'showproctime', 0)) { @@ -11781,7 +12030,7 @@ sub ReadingsBulkUpdateTime { readingsBulkUpdate ($hash, "background_processing_time", $bpt); DbRep_userexit ($name, "background_processing_time", $bpt); } - + if (defined $spt) { $spt = sprintf "%.4f", $spt; readingsBulkUpdate ($hash, "sql_processing_time", $spt); @@ -12149,7 +12398,7 @@ sub DbRep_beforeproc { if ($err) { Log3 ($name, 2, "DbRep $name - command message before $txt: \"$err\" "); my $erread = "Warning - message from command before $txt appeared"; - + ReadingsSingleUpdateValue ($hash, "before".$txt."_message", $err, 1); ReadingsSingleUpdateValue ($hash, "state", $erread, 1); } @@ -12180,16 +12429,16 @@ sub DbRep_afterproc { if ($err) { Log3 ($name, 2, qq{DbRep $name - command message after $cmd: "$err"}); - + $erread = $sval eq 'error' ? $sval : qq(WARNING - $cmd finished, but message after command appeared); - + ReadingsSingleUpdateValue ($hash, 'after_'.$cmd.'_message', $err, 1); ReadingsSingleUpdateValue ($hash, 'state', $erread, 1); - + return $erread; } } - + return '' if($sval && $sval !~ /running/xs); my $rtxt = $cmd eq "dump" ? "Database backup finished" : @@ -13096,13 +13345,13 @@ sub DbRep_OutputWriteToDB { my $reading = shift; my $wrstr = shift; my $optxt = shift; # Operation Kürzel - + my $hash = $defs{$name}; my $dbloghash = $defs{$hash->{HELPER}{DBLOGDEVICE}}; my $dblogname = $dbloghash->{NAME}; my $DbLogType = AttrVal ($dblogname, 'DbLogType', 'History'); my $supk = AttrVal ($dblogname, 'noSupportPK', 0); - + $device =~ s/[^A-Za-z\/\d_\.-]/\//g; $reading =~ s/[^A-Za-z\/\d_\.-]/\//g; my $type = 'calculated'; @@ -13117,11 +13366,11 @@ sub DbRep_OutputWriteToDB { if(!$dbloghash->{HELPER}{COLSET}) { $err = qq(No result of "$hash->{LASTCMD}" to database written. Cause: column width in "$hash->{DEF}" isn't set); - + Log3 ($name, 2, "DbRep $name - ERROR - $err"); - + $err = encode_base64($err,""); - + return ($err,$wrt,$irowdone); } @@ -13150,8 +13399,8 @@ sub DbRep_OutputWriteToDB { if($aggr =~ /no|day|week|month|year/) { $time = "00:00:01" if($time !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); # https://forum.fhem.de/index.php/topic,105787.msg1013920.html#msg1013920 - $ntime = "23:59:59" if($ntime !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); - + $ntime = "23:59:59" if($ntime !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); + ($year,$mon,$mday) = split "-", $ndate; $corr = $i != $ele ? 86400 : 0; $t1 = fhemTimeLocal(59, 59, 23, $mday, $mon-1, $year-1900)-$corr; @@ -13167,7 +13416,7 @@ sub DbRep_OutputWriteToDB { if($aggr eq "hour") { $time = "$hour:00:01" if($time !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); # https://forum.fhem.de/index.php/topic,105787.msg1013920.html#msg1013920 - $ntime = "$hour:59:59" if($ntime !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); + $ntime = "$hour:59:59" if($ntime !~ /^(\d{2}):(\d{2}):(\d{2})$/ || $hash->{LASTCMD} =~ /\bwriteToDB(Single(Start)?)*?\b/); } } @@ -13226,14 +13475,14 @@ sub DbRep_OutputWriteToDB { } } } - + return ($err,$wrt,$irowdone) if(!@wr_arr); - + #Log3 ($name, 2, "DbRep $name - data for write: \n". Dumper @wr_arr); #return; - # Schreibzyklus - ################## + # Schreibzyklus + ################## ($err, $dbh, my $dbmodel) = DbRep_dbConnect ($name, 0); return ($err,$wrt,$irowdone) if ($err); @@ -13246,22 +13495,22 @@ sub DbRep_OutputWriteToDB { else { Log3 ($name, 5, "DbRep $name -> Primary Key usage suppressed by attribute noSupportPK in DbLog >$dblogname<"); } - + my $sql = DbRep_createInsertSQLscheme ('history', $dbmodel, $usepkh); ($err, my $sth_ih) = DbRep_prepareOnly ($name, $dbh, $sql); return ($err,$wrt,$irowdone) if ($err); - + $sql = DbRep_createUpdateSQLscheme ('history'); ($err, my $sth_uh) = DbRep_prepareOnly ($name, $dbh, $sql); return ($err,$wrt,$irowdone) if ($err); - + $sql = DbRep_createInsertSQLscheme ('current', $dbmodel, $usepkc); ($err, my $sth_ic) = DbRep_prepareOnly ($name, $dbh, $sql); return ($err,$wrt,$irowdone) if ($err); - + $sql = DbRep_createUpdateSQLscheme ('current'); ($err, my $sth_uc) = DbRep_prepareOnly ($name, $dbh, $sql); - return ($err,$wrt,$irowdone) if ($err); + return ($err,$wrt,$irowdone) if ($err); $err = DbRep_beginDatabaseTransaction ($name, $dbh); return ($err,$wrt,$irowdone) if ($err); @@ -13273,7 +13522,7 @@ sub DbRep_OutputWriteToDB { for my $row (@wr_arr) { my @a = split "\\|", $row; - + $timestamp = $a[0]; $device = $a[1]; $type = $a[2]; @@ -13282,9 +13531,9 @@ sub DbRep_OutputWriteToDB { $value = $a[5]; $unit = $a[6]; - if (lc($DbLogType) =~ m(history) ) { - ($err, my $rv_uh) = DbRep_execUpdatePrepared ( { name => $name, - sth => $sth_uh, + if (lc($DbLogType) =~ m(history) ) { + ($err, my $rv_uh) = DbRep_execUpdatePrepared ( { name => $name, + sth => $sth_uh, timestamp => $timestamp, device => $device, type => $type, @@ -13292,20 +13541,20 @@ sub DbRep_OutputWriteToDB { reading => $reading, value => $value, unit => $unit - } + } ); if ($err) { $dbh->disconnect; - return ($err,$wrt,$irowdone); + return ($err,$wrt,$irowdone); } - + $uhs += $rv_uh if($rv_uh); Log3 ($name, 4, "DbRep $name - UPDATE history: $row, RESULT: $rv_uh"); if ($rv_uh == 0) { - ($err, my $rv_ih) = DbRep_execInsertPrepared ( { name => $name, - sth => $sth_ih, + ($err, my $rv_ih) = DbRep_execInsertPrepared ( { name => $name, + sth => $sth_ih, timestamp => $timestamp, device => $device, type => $type, @@ -13313,22 +13562,22 @@ sub DbRep_OutputWriteToDB { reading => $reading, value => $value, unit => $unit - } + } ); if ($err) { $dbh->disconnect; - return ($err,$wrt,$irowdone); + return ($err,$wrt,$irowdone); } - + $ihs += $rv_ih if($rv_ih); - + Log3 ($name, 4, "DbRep $name - INSERT history: $row, RESULT: $rv_ih"); } } - - if (lc($DbLogType) =~ m(current) ) { - ($err, my $rv_uc) = DbRep_execUpdatePrepared ( { name => $name, - sth => $sth_uc, + + if (lc($DbLogType) =~ m(current) ) { + ($err, my $rv_uc) = DbRep_execUpdatePrepared ( { name => $name, + sth => $sth_uc, timestamp => $timestamp, device => $device, type => $type, @@ -13336,16 +13585,16 @@ sub DbRep_OutputWriteToDB { reading => $reading, value => $value, unit => $unit - } + } ); if ($err) { $dbh->disconnect; - return ($err,$wrt,$irowdone); + return ($err,$wrt,$irowdone); } - + if ($rv_uc == 0) { - ($err, undef) = DbRep_execInsertPrepared ( { name => $name, - sth => $sth_ic, + ($err, undef) = DbRep_execInsertPrepared ( { name => $name, + sth => $sth_ic, timestamp => $timestamp, device => $device, type => $type, @@ -13353,7 +13602,7 @@ sub DbRep_OutputWriteToDB { reading => $reading, value => $value, unit => $unit - } + } ); } } @@ -13366,7 +13615,7 @@ sub DbRep_OutputWriteToDB { Log3 ($name, 3, "DbRep $name - number of lines updated in >$dblogname<: $uhs"); Log3 ($name, 3, "DbRep $name - number of lines inserted into >$dblogname<: $ihs"); - + $irowdone = $ihs + $uhs; $wrt = tv_interval($wst); # SQL-Laufzeit ermitteln @@ -13695,6 +13944,23 @@ sub DbRep_numval { return $val; } +################################################################ +# Zerlegung des Attributwertes "diffAccept" +################################################################ +sub DbRep_ExplodeDiffAcc { + my $val = shift // q{empty}; + + my $sign = q{}; + my $daval = q{}; + + if ($val =~/^(\+?-?)([0-9]+)$/xs) { + $sign = $1; + $daval = $2; + } + +return ($sign, $daval); +} + ################################################################ # Prüfung auf numerischen Wert (vorzeichenbehaftet) ################################################################ @@ -13827,12 +14093,12 @@ sub DbRep_setVersionInfo { if($modules{$type}{META}{x_prereqs_src} && !$hash->{HELPER}{MODMETAABSENT}) { # META-Daten sind vorhanden $modules{$type}{META}{version} = "v".$v; # Version aus META.json überschreiben, Anzeige mit {Dumper $modules{SMAPortal}{META}} - if($modules{$type}{META}{x_version}) { # {x_version} ( nur gesetzt wenn $Id: 93_DbRep.pm 27184 2023-02-05 19:47:19Z DS_Starter $ im Kopf komplett! vorhanden ) + if($modules{$type}{META}{x_version}) { # {x_version} ( nur gesetzt wenn $Id: 93_DbRep.pm 27340 2023-03-19 07:45:02Z DS_Starter $ im Kopf komplett! vorhanden ) $modules{$type}{META}{x_version} =~ s/1.1.1/$v/g; } else { $modules{$type}{META}{x_version} = $v; } - return $@ unless (FHEM::Meta::SetInternals($hash)); # FVERSION wird gesetzt ( nur gesetzt wenn $Id: 93_DbRep.pm 27184 2023-02-05 19:47:19Z DS_Starter $ im Kopf komplett! vorhanden ) + return $@ unless (FHEM::Meta::SetInternals($hash)); # FVERSION wird gesetzt ( nur gesetzt wenn $Id: 93_DbRep.pm 27340 2023-03-19 07:45:02Z DS_Starter $ im Kopf komplett! vorhanden ) if(__PACKAGE__ eq "FHEM::$type" || __PACKAGE__ eq $type) { # es wird mit Packages gearbeitet -> Perl übliche Modulversion setzen # mit {->VERSION()} im FHEMWEB kann Modulversion abgefragt werden @@ -13953,9 +14219,9 @@ sub DbRep_modAssociatedWith { my $hash = shift; my $cmd = shift; my $awdev = shift; - + my $name = $hash->{NAME}; - + my (@naw,@edvs,@edvspcs,$edevswc); my ($edevs,$idevice,$edevice) = ('','',''); @@ -14198,6 +14464,10 @@ return; Currently following set-commands are included. They are used to trigger the evaluations and define the evaluation option option itself. The criteria of searching database content and determine aggregation is carried out by setting several attributes.

+ + Note:
+ If you are in detail view it could be necessary to refresh the browser to see the result of operation as soon in DeviceOverview section "state = done" will be shown. +


-
  • maxValue [display | writeToDB | deleteOther] - - calculates the maximum value of database column "VALUE" between period given by attributes - timestamp_begin, "timestamp_end" / "timeDiffToNow / timeOlderThan" and so on. - The reading to evaluate must be defined using attribute reading. - The evaluation contains the timestamp of the last appearing of the identified maximum value - within the given period.

    +
  • maxValue [display | writeToDB | deleteOther]
    br> - If no option or the option display is specified, the results are only displayed. Using - option writeToDB the calculated results are stored in the database with a new reading - name.
    + Calculates the maximum value of database column "VALUE" between period given by attributes + timestamp_begin, "timestamp_end" / "timeDiffToNow / timeOlderThan" and so on. + The reading to evaluate must be defined using attribute reading. + The evaluation contains the timestamp of the last appearing of the identified maximum value + within the given period.

    - The new readingname is built of a prefix and the original reading name, - in which the original reading name can be replaced by the value of attribute readingNameMap. - The prefix is made up of the creation function and the aggregation.
    - The timestamp of the new stored readings is deviated from aggregation period, - unless no unique point of time of the result can be determined. - The field "EVENT" will be filled with "calculated".

    + If no option or the option display is specified, the results are only displayed. Using + option writeToDB the calculated results are stored in the database with a new reading + name.
    - With option deleteOther all datasets except the dataset with the maximum value are deleted.

    + The new readingname is built of a prefix and the original reading name, + in which the original reading name can be replaced by the value of attribute readingNameMap. + The prefix is made up of the creation function and the aggregation.
    + The timestamp of the new stored readings is deviated from aggregation period, + unless no unique point of time of the result can be determined. + The field "EVENT" will be filled with "calculated".

    - -
    + With option deleteOther all datasets except the dataset with the maximum value are deleted.

    - Summarized the relevant attributes to control this function are:

    + +
    -
  • - - - - - - - - - -
    aggregation : choose the aggregation period
    device : include or exclude <device> from selection
    executeBeforeProc : execution of FHEM command (or Perl-routine) before operation
    executeAfterProc : execution of FHEM command (or Perl-routine) after operation
    reading : include or exclude <reading> from selection
    readingNameMap : rename the resulted readings
    time.* : a number of attributes to limit selection by time
    valueFilter : an additional REGEXP to control the record selection. The REGEXP is applied to the database field 'VALUE'.
    - + Summarized the relevant attributes to control this function are:
    -
    + +
    -
  • minValue [display | writeToDB | deleteOther] - - calculates the minimum value of database column "VALUE" between period given by attributes - timestamp_begin, "timestamp_end" / "timeDiffToNow / timeOlderThan" and so on. - The reading to evaluate must be defined using attribute reading. - The evaluation contains the timestamp of the first appearing of the identified minimum - value within the given period.

    +
  • +
    - If no option or the option display is specified, the results are only displayed. Using - option writeToDB the calculated results are stored in the database with a new reading - name.
    + +
  • migrateCollation <Collation>

    - The new readingname is built of a prefix and the original reading name, - in which the original reading name can be replaced by the value of attribute readingNameMap. - The prefix is made up of the creation function and the aggregation.
    - The timestamp of the new stored readings is deviated from aggregation period, - unless no unique point of time of the result can be determined. - The field "EVENT" will be filled with "calculated".

    + Migrates the used character set/collation of the database and the tables current and history to the + specified format. +

    - With option deleteOther all datasets except the dataset with the maximum value are deleted.

    + Relevant attributes are:
    - -
    + +
    - Summarized the relevant attributes to control this function are:

    +
  • +
    - +
  • minValue [display | writeToDB | deleteOther]

    -

  • + Calculates the minimum value of database column "VALUE" between period given by attributes + timestamp_begin, "timestamp_end" / "timeDiffToNow / timeOlderThan" and so on. + The reading to evaluate must be defined using attribute reading. + The evaluation contains the timestamp of the first appearing of the identified minimum + value within the given period.

    -
  • optimizeTables [showInfo | execute] - - optimize tables in the connected database (MySQL).

    + If no option or the option display is specified, the results are only displayed. Using + option writeToDB the calculated results are stored in the database with a new reading + name.
    - -
    + The new readingname is built of a prefix and the original reading name, + in which the original reading name can be replaced by the value of attribute readingNameMap. + The prefix is made up of the creation function and the aggregation.
    + The timestamp of the new stored readings is deviated from aggregation period, + unless no unique point of time of the result can be determined. + The field "EVENT" will be filled with "calculated".

    - Before and after an optimization it is possible to execute a FHEM command. - (please see attributes executeBeforeProc, executeAfterProc) -

    + With option deleteOther all datasets except the dataset with the maximum value are deleted.

    - + +
    -
  • readingRename <[device:]oldreadingname>,<newreadingname>
    - Renames the reading name of a device inside the connected database (see Internal DATABASE). - The readingname will allways be changed in the entire database. - Possibly set time limits or restrictions by attributes - device and/or reading will not be considered.
    - As an option a device can be specified. In this case only the old readings of this device - will be renamed.

    + Relevant attributes are:
    - -
    + +
    - The amount of renamed reading names (datasets) will be displayed in reading "reading_renamed".
    - If the reading name to be renamed was not found in the database, a WARNING will appear in reading "reading_not_renamed".
    - Appropriate entries will be written to Logfile if verbose >= 3 is set. -

    +
  • +
    - Note:
    - Even though the function itself is designed non-blocking, make sure the assigned DbLog-device - is operating in asynchronous mode to avoid FHEMWEB from blocking.

    + +
  • optimizeTables [showInfo | execute]

    - The relevant attributes to control this function are:

    + Optimize tables in the connected database (MySQL).

    - -
    -
    + +
    -
  • + Relevant attributes are:
    -
  • reduceLog [<no>[:<nn>]] [mode] [EXCLUDE=device1:reading1,device2:reading2,...] [INCLUDE=device:reading]
    - Reduces historical data sets.

    + +
    +
  • - Operation without specifying command line operators

    + +
  • readingRename <[device:]oldreadingname>,<newreadingname>

    - The data is cleaned within the time limits defined by the time.*-attributes. - At least one of the time.* attributes must be set (see table below). - The respective missing time accrual is determined by the module in this case.
    - The working mode is determined by the optional specification of mode: -

    + Renames the reading name of a device inside the connected database (see Internal DATABASE). + The readingname will allways be changed in the entire database. + Possibly set time limits or restrictions by attributes + device and/or reading will not be considered.
    + As an option a device can be specified. In this case only the old readings of this device + will be renamed.

    - -
    + +
    + + The amount of renamed reading names (datasets) will be displayed in reading "reading_renamed".
    + If the reading name to be renamed was not found in the database, a WARNING will appear in reading "reading_not_renamed".
    + Appropriate entries will be written to Logfile if verbose >= 3 is set. +

    + + Relevant attributes are:
    + + +
    + +
    +
  • + +
  • reduceLog [<no>[:<nn>]] [mode] [EXCLUDE=device1:reading1,device2:reading2,...] [INCLUDE=device:reading]

    + + Reduces historical data sets.

    + + Operation without specifying command line operators

    + + The data is cleaned within the time limits defined by the time.*-attributes. + At least one of the time.* attributes must be set (see table below). + The respective missing time accrual is determined by the module in this case.
    + The working mode is determined by the optional specification of mode: +

    + + +
    - With the attributes device and reading the data records to be considered can be included - or be excluded. Both restrictions reduce the selected data and reduce the - resource requirements. - The read "reduceLogState" contains the execution result of the last reduceLog command.

    + With the attributes device and reading the data records to be considered can be included + or be excluded. Both restrictions reduce the selected data and reduce the + resource requirements. + The read "reduceLogState" contains the execution result of the last reduceLog command.

    - Taking the above into account, the following attributes are relevant for this function:

    + Relevant attributes are:
    - -
    + +
    - Examples:

    - +
    - Operation with specification of command line operators

    + Operation with specification of command line operators

    - Es werden Datensätze berücksichtigt die älter sind als <no> Tage und (optional) neuer sind als - <nn> Tage. - Records are considered that are older than <no> days and (optionally) newer than - <nn> days. - The working mode is determined by the optional specification of mode as described above. -

    + Es werden Datensätze berücksichtigt die älter sind als <no> Tage und (optional) neuer sind als + <nn> Tage. + Records are considered that are older than <no> days and (optionally) newer than + <nn> days. + The working mode is determined by the optional specification of mode as described above. +

    - The additions "EXCLUDE" or "INCLUDE" can be added to exclude or include device/reading combinations in reduceLog - and override the "device" and "reading" attributes, which are ignored in this case.
    - The specification in "EXCLUDE" is evaluated as a regex. Inside "INCLUDE", SQL wildcards - can be used. (for more information on SQL wildcards, see with get <name> versionNotes 6) -

    + The additions "EXCLUDE" or "INCLUDE" can be added to exclude or include device/reading combinations in reduceLog + and override the "device" and "reading" attributes, which are ignored in this case.
    + The specification in "EXCLUDE" is evaluated as a regex. Inside "INCLUDE", SQL wildcards + can be used. (for more information on SQL wildcards, see with get <name> versionNotes 6) +

    - Examples:

    - -
    + Examples:

    + +
    - Note:
    - Although the function itself is designed non-blocking, the assigned DbLog device should be operated in - asynchronous mode to avoid blocking FHEMWEB (table lock).
    - Furthermore it is strongly recommended to create the standard INDEX 'Search_Idx' in the table - 'history' !
    - The processing of this command may take an extremely long time (without INDEX).

    -

  • + Note:
    + Although the function itself is designed non-blocking, the assigned DbLog device should be operated in + asynchronous mode to avoid blocking FHEMWEB (table lock).
    + Furthermore it is strongly recommended to create the standard INDEX 'Search_Idx' in the table + 'history' !
    + The processing of this command may take an extremely long time (without INDEX).

    + + +
    -
  • repairSQLite [sec] - repairs a corrupted SQLite database.
    - A corruption is usally existent when the error message "database disk image is malformed" - appears in reading "state" of the connected DbLog-device. - If the command was started, the connected DbLog-device will firstly disconnected from the - database for 10 hours (36000 seconds) automatically (breakup time). After the repair is - finished, the DbLog-device will be connected to the (repaired) database immediately.
    - As an argument the command can be completed by a differing breakup time (in seconds).
    - The corrupted database is saved as <database>.corrupt in same directory.

    + +
  • repairSQLite [sec]

    + + Repairs a corrupted SQLite database.

    + + A corruption is usally existent when the error message "database disk image is malformed" + appears in reading "state" of the connected DbLog-device. + If the command was started, the connected DbLog-device will firstly disconnected from the + database for 10 hours (36000 seconds) automatically (breakup time). After the repair is + finished, the DbLog-device will be connected to the (repaired) database immediately.
    + As an argument the command can be completed by a differing breakup time (in seconds).
    + The corrupted database is saved as <database>.corrupt in same directory. +

    + + Relevant attributes are:
    - + Example:
    + -
  • restoreMySQL <File> - restore a database from serverSide- or clientSide-Dump.
    - The function provides a drop-down-list of files which can be used for restore.

    + Note:
    + It can't be guaranteed, that the repair attempt proceed successfully and no data loss will result. + Depending from corruption severity data loss may occur or the repair will fail even though + no error appears during the repair process. Please make sure a valid backup took place ! +

    + +
  • +
    + + +
  • restoreMySQL <File> - restore a database from serverSide- or clientSide-Dump.

    + + The function provides a drop-down-list of files which can be used for restore.

    - Usage of serverSide-Dumps
    - The content of history-table will be restored from a serverSide-Dump. - Therefore the remote directory "dumpDirRemote" of the MySQL-Server has to be mounted on the - Client and make it usable to the DbRep device by setting attribute dumpDirLocal - to the appropriate value.
    - All files with extension "csv[.gzip]" and if the filename is beginning with the name of the connected database - (see Internal DATABASE) are listed. -

    + Usage of serverSide-Dumps
    + The content of history-table will be restored from a serverSide-Dump. + Therefore the remote directory "dumpDirRemote" of the MySQL-Server has to be mounted on the + Client and make it usable to the DbRep device by setting attribute dumpDirLocal + to the appropriate value.
    + All files with extension "csv[.gzip]" and if the filename is beginning with the name of the connected database + (see Internal DATABASE) are listed. +

    - Usage of clientSide-Dumps
    - The used database user needs the FILE privilege (see Wiki).
    - All tables and views (if present) are restored. - The directory which contains the dump files has to be set by attribute dumpDirLocal - to make it usable by the DbRep device.
    - All files with extension "sql[.gzip]" and if the filename is beginning with the name of the connected database - (see Internal DATABASE) are listed.
    - The restore speed depends of the server variable "max_allowed_packet". You can change - this variable in file my.cnf to adapt the speed. Please consider the need of sufficient ressources - (especially RAM). -

    + Usage of clientSide-Dumps
    + The used database user needs the FILE privilege (see Wiki).
    + All tables and views (if present) are restored. + The directory which contains the dump files has to be set by attribute dumpDirLocal + to make it usable by the DbRep device.
    + All files with extension "sql[.gzip]" and if the filename is beginning with the name of the connected database + (see Internal DATABASE) are listed.
    + The restore speed depends of the server variable "max_allowed_packet". You can change + this variable in file my.cnf to adapt the speed. Please consider the need of sufficient ressources + (especially RAM). +

    - The database user needs rights for database management, e.g.:
    - CREATE, ALTER, INDEX, DROP, SHOW VIEW, CREATE VIEW -

    -

  • + The database user needs rights for database management, e.g.:
    + CREATE, ALTER, INDEX, DROP, SHOW VIEW, CREATE VIEW +

    + + Relevant attributes are:
    -
  • restoreSQLite <File>.sqlitebkp[.gzip] - restores a backup of SQLite database.
    - The function provides a drop-down-list of files which can be used for restore. - The data stored in the current database are deleted respectively overwritten. - All files with extension "sqlitebkp[.gzip]" and if the filename is beginning with the name of the connected database - will are listed.

    -

  • + +
    + + +
    + + +
  • restoreSQLite <File>.sqlitebkp[.gzip]

    + + Restores a backup of SQLite database.
    + The function provides a drop-down-list of files which can be used for restore. + The data stored in the current database are deleted respectively overwritten. + All files with extension "sqlitebkp[.gzip]" and if the filename is beginning with the name of the connected database + will are listed. +

    + + Relevant attributes are:
    + + +
    + +
  • +
  • sqlCmd

    @@ -15480,24 +15816,21 @@ return;
    - The attributes relevant for controlling sqlCmd are:

    + Relevant attributes are:

    -
    Note:
    Even though the module works non-blocking regarding to database operations, a huge @@ -15508,225 +15841,247 @@ return;

  • -
  • sqlCmdHistory - If activated with the attribute sqlCmdHistoryLength, - a stored SQL statement can be selected from a list and executed. - The SQL cache is automatically saved when FHEM is closed and restored when the system is started. - The following entries execute special functions: -

    - - -
    - - The attributes relevant to controlling this function are:

    - - -
    -
    -

  • - -
  • sqlSpecial - This function provides a drop-down list with a selection of prepared reportings.
    - The statements result is depicted in reading "SqlResult". - The result can be formatted by attribute sqlResultFormat - a well as the used field separator by attribute sqlResultFieldSep. -

    - - The following predefined reportings are selectable:

    - -
    - - The relevant attributes for this function are:

    - - -


  • - -
  • sumValue [display | writeToDB | writeToDBSingle | writeToDBInTime] - - Calculates the total values of the database field "VALUE" in the time limits - of the possible time.*-attributes.

    - - The reading to be evaluated must be specified in the attribute reading. - This function is useful if continuous value differences of a reading are written - into the database.

    - - If none or the option display is specified, the results are only displayed. With - the options writeToDB, writeToDBSingle or writeToDBInTime the calculation results are written - with a new reading name into the database.

    - - -
    - - The new reading name is formed from a prefix and the original reading name, - where the original reading name can be replaced by the attribute "readingNameMap". - The prefix consists of the educational function and the aggregation.
    - The timestamp of the new reading in the database is determined by the set aggregation period - if no clear time of the result can be determined. - The field "EVENT" is filled with "calculated".

    - - -
    - - Summarized the relevant attributes to control this function are:

    - - - -

  • - -
  • syncStandby <DbLog-Device Standby> - - datasets of the connected database (source) are transmitted into another database - (Standby-database).
    - Here the "<DbLog-Device Standby>" is the DbLog-Device what is connected to the - Standby-database.

    - All the datasets which are determined by timestamp_begin attribute - or respectively the attributes "device", "reading" are transmitted.
    - The datasets are transmitted in time slices accordingly to the adjusted aggregation. - If the attribute "aggregation" has value "no" or "month", the datasets are transmitted - automatically in daily time slices into standby-database. - Source- and Standby-database can be of different types. -

    - - The relevant attributes to control the syncStandby function are:

    - - - -

  • - - -
  • tableCurrentFillup - the current-table will be filled u with an extract of the history-table. - The attributes for limiting time and device, reading are considered. - Thereby the content of the extract can be affected. In the associated DbLog-device the attribute "DbLogType" should be set to - "SampleFill/History". -
    -
    - - The following attributes are relevant for this function:

    - - -
    -
    - -
  • - - -
  • tableCurrentPurge - deletes the content of current-table. There are no limits, e.g. by attributes "timestamp_begin", "timestamp_end", device, reading - and so on, considered. -
    -
    - - The following attributes are relevant for this function:

    - - -
    -
    - -
  • - - -
  • vacuum

    + +
  • sqlCmdHistory

    - Optimizes the tables in the connected database (SQLite, PostgreSQL).
    - Especially for SQLite databases it is strongly recommended to temporarily close the connection of the relevant DbLog - device to the database (see DbLog reopen command). + If activated with the attribute sqlCmdHistoryLength, + a stored SQL statement can be selected from a list and executed.
    + The SQL cache is automatically saved when FHEM is closed and restored when the system is started.
    + The following entries execute special functions:
    +

    + + +
    + + Relevant attributes are:
    + + +
    + +
  • +
    + + +
  • sqlSpecial

    + + This function provides a drop-down list with a selection of prepared reportings.
    + The statements result is depicted in reading "SqlResult". + The result can be formatted by attribute sqlResultFormat + a well as the used field separator by attribute sqlResultFieldSep. +

    + + +
    + + Relevant attributes are:
    + +

    - The following attributes are relevant for this function:

    +
  • +
    + + +
  • sumValue [display | writeToDB | writeToDBSingle | writeToDBInTime]

    + + Calculates the total values of the database field "VALUE" in the time limits + of the possible time.*-attributes.

    + + The reading to be evaluated must be specified in the attribute reading. + This function is useful if continuous value differences of a reading are written + into the database.

    + + If none or the option display is specified, the results are only displayed. With + the options writeToDB, writeToDBSingle or writeToDBInTime the calculation results are written + with a new reading name into the database.


    + The new reading name is formed from a prefix and the original reading name, + where the original reading name can be replaced by the attribute "readingNameMap". + The prefix consists of the educational function and the aggregation.
    + The timestamp of the new reading in the database is determined by the set aggregation period + if no clear time of the result can be determined. + The field "EVENT" is filled with "calculated".

    + + +
    + + Relevant attributes are:
    + + +
    +
    + +
  • +
    + +
  • syncStandby <DbLog-Device Standby>

    + + Datasets of the connected database (source) are transmitted into another database + (Standby-database).
    + Here the "<DbLog-Device Standby>" is the DbLog-Device what is connected to the + Standby-database.

    + All the datasets which are determined by timestamp_begin attribute + or respectively the attributes "device", "reading" are transmitted.
    + The datasets are transmitted in time slices accordingly to the adjusted aggregation. + If the attribute "aggregation" has value "no" or "month", the datasets are transmitted + automatically in daily time slices into standby-database. + Source- and Standby-database can be of different types. +

    + + Relevant attributes are:
    + + +
    +
    + +
  • +
    + + +
  • tableCurrentFillup

    + + The current-table will be filled u with an extract of the history-table.
    + The attributes for limiting time and device, reading are considered.
    + Thereby the content of the extract can be affected.
    + In the associated DbLog-device the attribute "DbLogType" should be set to "SampleFill/History". +
    +
    + + Relevant attributes are:
    + + +
    +
    + +
  • + + +
  • tableCurrentPurge

    + + Deletes the content of current-table.
    + There are no limits, e.g. by attributes timestamp_begin, timestamp_end, device or reading + considered. +
    +
    + + Relevant attributes are:
    + + +
    +
    + +
  • + + +
  • vacuum

    + + Optimizes the tables in the connected database (SQLite, PostgreSQL).
    + Especially for SQLite databases it is strongly recommended to temporarily close the connection of the relevant DbLog + device to the database (see DbLog reopen command). +
    +
    + + Relevant attributes are:
    + + +
    + Note:
    When the vacuum command is executed, the PRAGMA auto_vacuum = FULL is automatically applied to SQLite databases.
    - The vacuum command requires additional temporary memory. If there is not enough space in the default TMPDIR directory, + The vacuum command requires additional temporary memory. If there is not enough space in the default TMPDIR directory, SQLite can be assigned a sufficiently large directory by setting the environment variable SQLITE_TMPDIR.
    (see also: www.sqlite.org/tempfiles)
  • @@ -15737,20 +16092,6 @@ return; - For all evaluation variants (except sqlCmd,deviceRename,readingRename) applies:
    - In addition to the needed reading the device can be complemented to restrict the datasets for reporting / function. - If no time limit attribute is set but aggregation is set, the period from the oldest dataset in database to the current - date/time will be used as selection criterion. If the oldest dataset wasn't identified, then '1970-01-01 01:00:00' is used - as start date (see get <name> "minTimestamp" also). - If both time limit attribute and aggregation isn't set, the selection on database is runnung without timestamp criterion. -

    - - Note:
    - - If you are in detail view it could be necessary to refresh the browser to see the result of operation as soon in DeviceOverview section "state = done" will be shown. - -

    - @@ -16382,7 +16723,7 @@ sub bdump {
  • seqDoubletsVariance <positive variance [negative variance] [EDGE=negative|positive]>

    - + Accepted variance for the command "set <name> delSeqDoublets".
    The value of this attribute describes the variance up to consecutive numeric values (VALUE) of datasets are handled as identical. If only one numeric value is declared, it is used as @@ -17098,6 +17439,11 @@ return; Zur Zeit gibt es folgende Set-Kommandos. Über sie werden die Auswertungen angestoßen und definieren selbst die Auswertungsvariante. Nach welchen Kriterien die Datenbankinhalte durchsucht werden und die Aggregation erfolgt, wird durch Attribute gesteuert.

    + + Hinweis:
    + + In der Detailansicht kann ein Browserrefresh nötig sein um die Operationsergebnisse zu sehen sobald im DeviceOverview "state = done" angezeigt wird. +


    - Die zur Steuerung von sqlCmd relevanten Attribute sind:

    + Relevante Attribute sind:

    @@ -18431,226 +18848,248 @@ return; zu große Ergebnismenge (Anzahl Zeilen bzw. Readings) die Browsersesssion bzw. FHEMWEB blockieren.
    Wenn man sich unsicher ist, sollte man vorsorglich dem Statement ein Limit - hinzufügen.

    + hinzufügen. +

    +

  • -
  • sqlCmdHistory - Wenn mit dem Attribut sqlCmdHistoryLength aktiviert, kann - ein gespeichertes SQL-Statement aus einer Liste ausgewählt und ausgeführt werden. - Der SQL Cache wird beim Beenden von FHEM automatisch gesichert und beim Start des Systems wiederhergestellt. - Mit den nachfolgenden Einträgen werden spezielle Funktionen ausgeführt: -

    - - -
    - - Die zur Steuerung dieser Funktion relevante Attribute sind:

    - - -
    -
    - -

  • - -
  • sqlSpecial - Die Funktion bietet eine Drop-Downliste mit einer Auswahl vorbereiter Auswertungen - an.
    - Das Ergebnis des Statements wird im Reading "SqlResult" dargestellt. - Die Ergebnis-Formatierung kann durch das Attribut sqlResultFormat - ausgewählt, sowie der verwendete Feldtrenner durch das Attribut sqlResultFieldSep - festgelegt werden.

    - - Es sind die folgenden vordefinierte Auswertungen auswählbar:

    - -
    - - Die für diese Funktion relevanten Attribute sind:

    - - -


  • - -
  • sumValue [display | writeToDB | writeToDBSingle | writeToDBInTime] - - Berechnet die Summenwerte des Datenbankfelds "VALUE" in den Zeitgrenzen - der möglichen time.*-Attribute.

    - - Es muss das auszuwertende Reading im Attribut reading - angegeben sein. Diese Funktion ist sinnvoll wenn fortlaufend Wertedifferenzen eines - Readings in die Datenbank geschrieben werden.

    - - Ist keine oder die Option display angegeben, werden die Ergebnisse nur angezeigt. Mit - den Optionen writeToDB, writeToDBSingle bzw. writeToDBInTime werden die Berechnungsergebnisse - mit einem neuen Readingnamen in der Datenbank gespeichert.

    - - -
    - - Der neue Readingname wird aus einem Präfix und dem originalen Readingnamen gebildet, - wobei der originale Readingname durch das Attribut "readingNameMap" ersetzt werden kann. - Der Präfix setzt sich aus der Bildungsfunktion und der Aggregation zusammen.
    - Der Timestamp der neuen Readings in der Datenbank wird von der eingestellten Aggregationsperiode - abgeleitet, sofern kein eindeutiger Zeitpunkt des Ergebnisses bestimmt werden kann. - Das Feld "EVENT" wird mit "calculated" gefüllt.

    - - -
    - - Zusammengefasst sind die zur Steuerung dieser Funktion relevanten Attribute:

    - - -
    - -

  • - -
  • syncStandby <DbLog-Device Standby> - - Es werden die Datensätze aus der angeschlossenen Datenbank (Quelle) direkt in eine weitere - Datenbank (Standby-Datenbank) übertragen. - Dabei ist "<DbLog-Device Standby>" das DbLog-Device, welches mit der Standby-Datenbank - verbunden ist.

    - Es werden alle Datensätze übertragen, die durch das timestamp_begin Attribut - bzw. die Attribute "device", "reading" bestimmt sind.
    - Die Datensätze werden dabei in Zeitscheiben entsprechend der eingestellten Aggregation übertragen. - Hat das Attribut "aggregation" den Wert "no" oder "month", werden die Datensätze automatisch - in Tageszeitscheiben zur Standby-Datenbank übertragen. - Quell- und Standby-Datenbank können unterschiedlichen Typs sein. -

    - - Die zur Steuerung der syncStandby Funktion relevanten Attribute sind:

    - - -
    - -

  • + +
  • sqlCmdHistory

    + Wenn mit dem Attribut sqlCmdHistoryLength aktiviert, kann + ein gespeichertes SQL-Statement aus einer Liste ausgewählt und ausgeführt werden.
    + Der SQL Cache wird beim Beenden von FHEM automatisch gesichert und beim Start des Systems wiederhergestellt.
    + Mit den nachfolgenden Einträgen werden spezielle Funktionen ausgeführt:
    +

    + + +
    + + Relevante Attribute sind:
    + + +
    + +
  • +
    + + +
  • sqlSpecial

    + + Die Funktion bietet eine Drop-Downliste mit einer Auswahl vorbereiter Auswertungen + an.
    + Das Ergebnis des Statements wird im Reading "SqlResult" dargestellt. + Die Ergebnis-Formatierung kann durch das Attribut sqlResultFormat + ausgewählt, sowie der verwendete Feldtrenner durch das Attribut sqlResultFieldSep + festgelegt werden.

    + + +
    + + Relevante Attribute sind:
    + + +
    +
    + +
  • +
    + +
  • sumValue [display | writeToDB | writeToDBSingle | writeToDBInTime]

    + + Berechnet die Summenwerte des Datenbankfelds "VALUE" in den Zeitgrenzen + der möglichen time.*-Attribute.

    + + Es muss das auszuwertende Reading im Attribut reading + angegeben sein.
    + Diese Funktion ist sinnvoll wenn fortlaufend Wertedifferenzen eines + Readings in die Datenbank geschrieben werden.

    + + Ist keine oder die Option display angegeben, werden die Ergebnisse nur angezeigt.
    + Mit den Optionen writeToDB, writeToDBSingle bzw. writeToDBInTime werden die + Berechnungsergebnisse mit einem neuen Readingnamen in der Datenbank gespeichert.

    + + +
    + + Der neue Readingname wird aus einem Präfix und dem originalen Readingnamen gebildet,
    + wobei der originale Readingname durch das Attribut "readingNameMap" ersetzt werden kann.
    + Der Präfix setzt sich aus der Bildungsfunktion und der Aggregation zusammen.
    + Der Timestamp der neuen Readings in der Datenbank wird von der eingestellten Aggregationsperiode abgeleitet,
    + sofern kein eindeutiger Zeitpunkt des Ergebnisses bestimmt werden kann. + Das Feld "EVENT" wird mit "calculated" gefüllt.

    + + +
    + + Relevante Attribute sind:
    + + +
    +
    + +
  • +
    + +
  • syncStandby <DbLog-Device Standby>

    + + Es werden die Datensätze aus der angeschlossenen Datenbank (Quelle) direkt in eine weitere + Datenbank (Standby-Datenbank) übertragen. + Dabei ist "<DbLog-Device Standby>" das DbLog-Device, welches mit der Standby-Datenbank + verbunden ist.

    + Es werden alle Datensätze übertragen, die durch das timestamp_begin Attribut + bzw. die Attribute "device", "reading" bestimmt sind.
    + Die Datensätze werden dabei in Zeitscheiben entsprechend der eingestellten Aggregation übertragen. + Hat das Attribut "aggregation" den Wert "no" oder "month", werden die Datensätze automatisch + in Tageszeitscheiben zur Standby-Datenbank übertragen. + Quell- und Standby-Datenbank können unterschiedlichen Typs sein. +

    + + Relevante Attribute sind:
    + + +
    +
    + +
  • +
    + -
  • tableCurrentFillup - Die current-Tabelle wird mit einem Extrakt der history-Tabelle aufgefüllt. - Die Attribute zur Zeiteinschränkung bzw. device, reading werden ausgewertet. - Dadurch kann der Inhalt des Extrakts beeinflusst werden. Im zugehörigen DbLog-Device sollte sollte das Attribut - "DbLogType=SampleFill/History" gesetzt sein. -
    -
    - - Für diese Funktion sind folgende Attribute relevant:

    +
  • tableCurrentFillup

    + + Die current-Tabelle wird mit einem Extrakt der history-Tabelle aufgefüllt.
    + Die Attribute zur Zeiteinschränkung bzw. device, reading werden ausgewertet.
    + Dadurch kann der Inhalt des Extrakts beeinflusst werden.
    + Im zugehörigen DbLog-Device sollte das Attribut "DbLogType=SampleFill/History" gesetzt sein. +
    +
    - -
    -
    + Relevante Attribute sind:
    + + +
    +
    + +
  • - - -
  • tableCurrentPurge - löscht den Inhalt der current-Tabelle. Es werden keine Limitierungen, z.B. durch die Attribute "timestamp_begin", - "timestamp_end", device, reading, usw. , ausgewertet. -
    -
    +
  • tableCurrentPurge

    + + Löscht den Inhalt der current-Tabelle.
    + Es werden keine Limitierungen, z.B. durch die Attribute timestamp_begin, + timestamp_end, device oder reading ausgewertet. +
    +
    - Für diese Funktion sind folgende Attribute relevant:

    + Relevante Attribute sind:
    - -
    -
    + +
    +
    -
  • + -
  • vacuum

    - +
  • vacuum

    + Optimiert die Tabellen in der angeschlossenen Datenbank (SQLite, PostgreSQL).
    - Insbesondere für SQLite Datenbanken ist unbedingt empfehlenswert die Verbindung des relevanten DbLog-Devices zur + Insbesondere für SQLite Datenbanken ist unbedingt empfehlenswert die Verbindung des relevanten DbLog-Devices zur Datenbank vorübergehend zu schließen (siehe DbLog reopen Kommando).

    - - Für diese Funktion sind folgende Attribute relevant:

    + + Relevante Attribute sind:

    @@ -18658,7 +19097,7 @@ return; Bei der Ausführung des vacuum Kommandos wird bei SQLite Datenbanken automatisch das PRAGMA auto_vacuum = FULL angewendet.
    Das vacuum Kommando erfordert zusätzlichen temporären Speicherplatz. Sollte der Platz im Standard TMPDIR Verzeichnis - nicht ausreichen, kann SQLite durch setzen der Umgebungsvariable SQLITE_TMPDIR ein ausreichend großes Verzeichnis + nicht ausreichen, kann SQLite durch setzen der Umgebungsvariable SQLITE_TMPDIR ein ausreichend großes Verzeichnis zugewiesen werden.
    (siehe: www.sqlite.org/tempfiles)
  • @@ -18669,21 +19108,6 @@ return; - Für alle Auswertungsvarianten (Ausnahme sqlCmd,deviceRename,readingRename) gilt:
    - Zusätzlich zu dem auszuwertenden Reading kann das Device mit angegeben werden um das Reporting nach diesen Kriterien einzuschränken. - Sind keine Zeitgrenzen-Attribute angegeben jedoch das Aggregations-Attribut gesetzt, wird der Zeitstempel des ältesten - Datensatzes in der Datenbank als Startdatum und das aktuelle Datum/die aktuelle Zeit als Zeitgrenze genutzt. - Konnte der älteste Datensatz in der Datenbank nicht ermittelt werden, wird '1970-01-01 01:00:00' als Selektionsstart - genutzt (siehe get <name> minTimestamp). - Sind weder Zeitgrenzen-Attribute noch Aggregation angegeben, wird die Datenselektion ohne Timestamp-Einschränkungen - ausgeführt. -

    - - Hinweis:
    - - In der Detailansicht kann ein Browserrefresh nötig sein um die Operationsergebnisse zu sehen sobald im DeviceOverview "state = done" angezeigt wird. -

    - @@ -19008,25 +19432,41 @@ sub dbval { -
  • diffAccept - gilt für Funktion diffValue. diffAccept legt fest bis zu welchem Schwellenwert eine berechnete positive Werte-Differenz - zwischen zwei unmittelbar aufeinander folgenden Datensätzen akzeptiert werden soll (Standard ist 20).
    - Damit werden fehlerhafte DB-Einträge mit einem unverhältnismäßig hohen Differenzwert von der Berechnung ausgeschlossen und - verfälschen nicht das Ergebnis. Sollten Schwellenwertüberschreitungen vorkommen, wird das Reading "diff_overrun_limit_<diffLimit>" - erstellt. (<diffLimit> wird dabei durch den aktuellen Attributwert ersetzt) - Es enthält eine Liste der relevanten Wertepaare. Mit verbose 3 werden diese Datensätze ebenfalls im Logfile protokolliert. -

    +
  • diffAccept [+-]<Schwellenwert>

    + + diffAccept legt für die Funktion diffValue fest, bis zu welchem <Schwellenwert> eine + Werte-Differenz zwischen zwei unmittelbar aufeinander folgenden Datensätzen akzeptiert werden.
    + Wird dem Schwellenwert +- (optional) vorangestellt, werden sowohl positive als auch negative Differenzen + ausgewertet. +

    + + (default: 20, nur positve Differenzen zwischen Vorgänger und Nachfolger) +

    + + +
    + + Bei Schwellenwertüberschreitungen wird das Reading diff_overrun_limit_<Schwellenwert> + erstellt.
    + Es enthält eine Liste der relevanten Wertepaare. Mit verbose 3 werden diese Datensätze ebenfalls im Logfile protokolliert. +

    - + +
    +
  • dumpComment - User-Kommentar. Er wird im Kopf des durch den Befehl "dumpMyQL clientSide" erzeugten Dumpfiles @@ -19037,7 +19477,7 @@ sub dbval {
  • dumpDirLocal

    - +