diff --git a/fhem/CHANGED b/fhem/CHANGED index a128e013e..36dfbdcbf 100644 --- a/fhem/CHANGED +++ b/fhem/CHANGED @@ -1,5 +1,6 @@ # Add changes at the top of the list. Keep it in ASCII, and 80-char wide. # Do not insert empty lines here, update check depends on it. + - change: 93_DbLog: revise commandref - bugfix: 98_vitoconnect: Missing update interval after login failure - bugfix: 73_km200 : Forward declaration made obsolete - bugfix: 73_ElectricityCalculator : 2nd DST bug, WFR Reading deleted diff --git a/fhem/FHEM/93_DbLog.pm b/fhem/FHEM/93_DbLog.pm index 81ee3fe7f..089a75a5b 100644 --- a/fhem/FHEM/93_DbLog.pm +++ b/fhem/FHEM/93_DbLog.pm @@ -14,6 +14,10 @@ # reduceLog() created by Claudiu Schuster (rapster) # ############################################################################################################################################ +# +# Leerzeichen entfernen: sed -i 's/[[:space:]]*$//' 93_DbLog.pm +# +######################################################################################################################### package main; use strict; @@ -27,10 +31,11 @@ use Time::HiRes qw(gettimeofday tv_interval); use Time::Local; use Encode qw(encode_utf8); use HttpUtils; -no if $] >= 5.017011, warnings => 'experimental::smartmatch'; +no if $] >= 5.017011, warnings => 'experimental::smartmatch'; # Version History intern by DS_Starter: my %DbLog_vNotesIntern = ( + "4.13.3" => "26.11.2022 revise commandref ", "4.13.2" => "06.11.2022 Patch Delta calculation (delta-d,delta-h) https://forum.fhem.de/index.php/topic,129975.msg1242272.html#msg1242272 ", "4.13.1" => "16.10.2022 edit commandref ", "4.13.0" => "15.04.2022 new Attr convertTimezone, minor fixes in reduceLog(NbL) ", @@ -115,7 +120,7 @@ my %DbLog_vNotesIntern = ( "3.8.5" => "16.02.2018 changed ParseEvent for Zwave ", "3.8.4" => "07.02.2018 minor fixes of \"\$\@\", code review, eval for userCommand, DbLog_ExecSQL1 (forum:#83973) ", "3.8.3" => "03.02.2018 call execmemcache only syncInterval/2 if cacheLimit reached and DB is not reachable, fix handling of ". - "\"\$\@\" in DbLog_PushAsync ", + "\"\$\@\" in DbLog_PushAsync ", "3.8.2" => "31.01.2018 RaiseError => 1 in DbLog_ConnectPush, DbLog_ConnectNewDBH, configCheck improved ", "3.8.1" => "29.01.2018 Use of uninitialized value \$txt if addlog has no value ", "3.8.0" => "26.01.2018 escape \"\|\" in events to log events containing it ", @@ -133,7 +138,7 @@ my %DbLog_vNotesIntern = ( "3.3.0" => "07.12.2017 avoid print out the content of cache by \"list device\" ", "3.2.0" => "06.12.2017 change attribute \"autocommit\" to \"commitMode\", activate choice of autocommit/transaction in logging ". "Addlog/addCacheLine change \$TIMESTAMP check ". - "rebuild DbLog_Push/DbLog_PushAsync due to bugfix in update current (Forum:#80519) ". + "rebuild DbLog_Push/DbLog_PushAsync due to bugfix in update current (Forum:#80519) ". "new attribute \"useCharfilter\" for Characterfilter usage ", "3.1.1" => "05.12.2017 Characterfilter added to avoid unwanted characters what may destroy transaction ", "3.1.0" => "05.12.2017 new set command addCacheLine ", @@ -259,7 +264,7 @@ my %DbLog_columns = ("DEVICE" => 64, "VALUE" => 128, "UNIT" => 32 ); - + my $dblog_cachedef = 500; # default Größe cacheLimit bei asynchronen Betrieb ################################################################ @@ -310,7 +315,7 @@ sub DbLog_Initialize { "useCharfilter:0,1 ". "valueFn:textField-long ". "verbose4Devs ". - $readingFnAttributes; + $readingFnAttributes; addToAttrList("DbLogInclude"); addToAttrList("DbLogExclude"); @@ -318,9 +323,9 @@ sub DbLog_Initialize { $hash->{FW_detailFn} = "DbLog_fhemwebFn"; $hash->{SVG_sampleDataFn} = "DbLog_sampleDataFn"; - + eval { FHEM::Meta::InitMod( __FILE__, $hash ) }; # für Meta.pm (https://forum.fhem.de/index.php/topic,97589.0.html) - + return; } @@ -329,20 +334,20 @@ sub DbLog_Define { my ($hash, $def) = @_; my $name = $hash->{NAME}; my @a = split "[ \t][ \t]*", $def; - + if($DbLogMMDBI) { Log3($name, 1, "DbLog $name - ERROR - Perl module ".$DbLogMMDBI." is missing. DbLog module is not loaded ! On Debian systems you can install it with \"sudo apt-get install libdbi-perl\" "); return "Error: Perl module ".$DbLogMMDBI." is missing. Install it on Debian with: sudo apt-get install libdbi-perl"; - } + } return "wrong syntax: define DbLog configuration regexp" if(int(@a) != 4); - + $hash->{CONFIGURATION} = $a[2]; my $regexp = $a[3]; eval { "Hallo" =~ m/^$regexp$/ }; return "Bad regexp: $@" if($@); - + $hash->{REGEXP} = $regexp; $hash->{MODE} = AttrVal($hash->{NAME}, "asyncMode", undef)?"asynchronous":"synchronous"; # Mode setzen Forum:#76213 $hash->{HELPER}{OLDSTATE} = "initialized"; @@ -352,13 +357,13 @@ sub DbLog_Define { # Versionsinformationen setzen DbLog_setVersionInfo($hash); - + # nur Events dieser Devices an NotifyFn weiterleiten, NOTIFYDEV wird gesetzt wenn möglich notifyRegexpChanged($hash, $regexp); - + #remember PID for plotfork $hash->{PID} = $$; - + # CacheIndex für Events zum asynchronen Schreiben in DB $data{DbLog}{$name}{cache}{index} = 0; @@ -367,9 +372,9 @@ sub DbLog_Define { if ($ret) { # return on error while reading configuration Log3($name, 1, "DbLog $name - Error while reading $hash->{CONFIGURATION}: '$ret' "); - return $ret; + return $ret; } - + # set used COLUMNS InternalTimer(gettimeofday()+2, "DbLog_setinternalcols", $hash, 0); @@ -378,7 +383,7 @@ sub DbLog_Define { # initial execution of DbLog_execmemcache DbLog_execmemcache($hash); - + return; } @@ -387,34 +392,34 @@ sub DbLog_Undef { my $hash = shift; my $name = shift; my $dbh = $hash->{DBHP}; - + BlockingKill($hash->{HELPER}{".RUNNING_PID"}) if($hash->{HELPER}{".RUNNING_PID"}); BlockingKill($hash->{HELPER}{REDUCELOG_PID}) if($hash->{HELPER}{REDUCELOG_PID}); BlockingKill($hash->{HELPER}{COUNT_PID}) if($hash->{HELPER}{COUNT_PID}); BlockingKill($hash->{HELPER}{DELDAYS_PID}) if($hash->{HELPER}{DELDAYS_PID}); - + $dbh->disconnect() if(defined($dbh)); - + RemoveInternalTimer($hash); delete $data{DbLog}{$name}; - + return; } ####################################################################################################### -# Mit der X_DelayedShutdown Funktion kann eine Definition das Stoppen von FHEM verzögern um asynchron -# hinter sich aufzuräumen. +# Mit der X_DelayedShutdown Funktion kann eine Definition das Stoppen von FHEM verzögern um asynchron +# hinter sich aufzuräumen. # Je nach Rückgabewert $delay_needed wird der Stopp von FHEM verzögert (0 | 1). -# Sobald alle nötigen Maßnahmen erledigt sind, muss der Abschluss mit CancelDelayedShutdown($name) an -# FHEM zurückgemeldet werden. +# Sobald alle nötigen Maßnahmen erledigt sind, muss der Abschluss mit CancelDelayedShutdown($name) an +# FHEM zurückgemeldet werden. ####################################################################################################### sub DbLog_DelayedShutdown { my $hash = shift; my $name = $hash->{NAME}; my $async = AttrVal($name, "asyncMode", ""); - + return 0 if(IsDisabled($name)); - + $hash->{HELPER}{SHUTDOWNSEQ} = 1; # return 0 if(!$async && !$hash->{HELPER}{PUSHISRUNNING}); Log3($name, 2, "DbLog $name - Last database write cycle due to shutdown ..."); @@ -436,18 +441,18 @@ sub DbLog_Attr { my $do = 0; if($cmd eq "set") { - if ($aName eq "syncInterval" || - $aName eq "cacheLimit" || + if ($aName eq "syncInterval" || + $aName eq "cacheLimit" || $aName eq "cacheOverflowThreshold" || - $aName eq "SQLiteCacheSize" || - $aName eq "timeout") { + $aName eq "SQLiteCacheSize" || + $aName eq "timeout") { if ($aVal !~ /^[0-9]+$/) { return "The Value of $aName is not valid. Use only figures 0-9 !";} } - + if ($hash->{MODEL} !~ /MYSQL|POSTGRESQL/ && $aName =~ /dbSchema/) { return "\"$aName\" is not valid for database model \"$hash->{MODEL}\""; } - + if( $aName eq 'valueFn' ) { my %specials= ( "%TIMESTAMP" => $name, @@ -468,33 +473,33 @@ sub DbLog_Attr { if ($aName eq "shutdownWait") { return "DbLog $name - The attribute $aName is deprecated and has been removed !"; - } + } - if ($aName eq "SQLiteCacheSize" || $aName eq "SQLiteJournalMode") { + if ($aName eq "SQLiteCacheSize" || $aName eq "SQLiteJournalMode") { InternalTimer(gettimeofday()+1.0, "DbLog_attrForSQLite", $hash, 0); InternalTimer(gettimeofday()+1.5, "DbLog_attrForSQLite", $hash, 0); # muß zweimal ausgeführt werden - Grund unbekannt :-( } if ($aName eq "convertTimezone") { return "The library FHEM::Utility::CTZ is missed. Please update FHEM completely." if($ctzAbsent); - + my $rmf = reqModFail(); return "You have to install the required perl module: ".$rmf if($rmf); - } + } } - + if($aName eq "colEvent" || $aName eq "colReading" || $aName eq "colValue") { if ($cmd eq "set" && $aVal) { unless ($aVal =~ /^[0-9]+$/) { return " The Value of $aName is not valid. Use only figures 0-9 !";} } InternalTimer(gettimeofday()+0.5, "DbLog_setinternalcols", $hash, 0); } - + if($aName eq "asyncMode") { if ($cmd eq "set" && $aVal) { $hash->{MODE} = "asynchronous"; InternalTimer(gettimeofday()+2, "DbLog_execmemcache", $hash, 0); - } + } else { $hash->{MODE} = "synchronous"; delete($defs{$name}{READINGS}{NextSync}); @@ -505,37 +510,37 @@ sub DbLog_Attr { InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } - + if($aName eq "commitMode") { if ($dbh) { $dbh->commit() if(!$dbh->{AutoCommit}); $dbh->disconnect(); } } - + if($aName eq "showproctime") { if ($cmd ne "set" || !$aVal) { delete($defs{$name}{READINGS}{background_processing_time}); delete($defs{$name}{READINGS}{sql_processing_time}); } } - + if($aName eq "showNotifyTime") { if ($cmd ne "set" || !$aVal) { delete($defs{$name}{READINGS}{notify_processing_time}); } } - + if($aName eq "noNotifyDev") { my $regexp = $hash->{REGEXP}; if ($cmd eq "set" && $aVal) { delete($hash->{NOTIFYDEV}); - } + } else { - notifyRegexpChanged($hash, $regexp); + notifyRegexpChanged($hash, $regexp); } } - + if ($aName eq "disable") { my $async = AttrVal($name, "asyncMode", 0); if($cmd eq "set") { @@ -543,18 +548,18 @@ sub DbLog_Attr { } $do = 0 if($cmd eq "del"); my $val = ($do == 1 ? "disabled" : "active"); - + # letzter CacheSync vor disablen DbLog_execmemcache($hash) if($do == 1); - + DbLog_setReadingstate ($hash, $val); - + if ($do == 0) { InternalTimer(gettimeofday()+2, "DbLog_execmemcache", $hash, 0) if($async); InternalTimer(gettimeofday()+2, "DbLog_ConnectPush", $hash, 0) if(!$async); } } - + if ($aName eq "traceHandles") { if($cmd eq "set") { unless ($aVal =~ /^[0-9]+$/) {return " The Value of $aName is not valid. Use only figures 0-9 without decimal places !";} @@ -564,24 +569,24 @@ sub DbLog_Attr { $do = ($aVal) ? 1 : 0; } $do = 0 if($cmd eq "del"); - if($do) { + if($do) { InternalTimer(gettimeofday()+5, "DbLog_startShowChildhandles", "$name:Main", 0); } } - + if ($aName eq "dbSchema") { if($cmd eq "set") { $do = ($aVal) ? 1 : 0; } $do = 0 if($cmd eq "del"); - + if ($do == 1) { - $hash->{HELPER}{TH} = $aVal.".history"; - $hash->{HELPER}{TC} = $aVal.".current"; - } + $hash->{HELPER}{TH} = $aVal.".history"; + $hash->{HELPER}{TC} = $aVal.".current"; + } else { - $hash->{HELPER}{TH} = "history"; - $hash->{HELPER}{TC} = "current"; + $hash->{HELPER}{TH} = "history"; + $hash->{HELPER}{TC} = "current"; } } @@ -595,20 +600,20 @@ sub DbLog_attrForSQLite { my $hash = shift; return if($hash->{MODEL} ne "SQLITE"); - + my $name = $hash->{NAME}; - + my $dbh = $hash->{DBHP}; if ($dbh) { my $history = $hash->{HELPER}{TH}; if(!$dbh->{AutoCommit}) { - eval {$dbh->commit()} or Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + eval {$dbh->commit()} or Log3($name, 2, "DbLog $name -> Error commit $history - $@"); + } $dbh->disconnect(); } - DbLog_ConnectPush ($hash,1); - -return; + DbLog_ConnectPush ($hash,1); + +return; } ################################################################ @@ -616,7 +621,7 @@ sub DbLog_Set { my ($hash, @a) = @_; my $name = $hash->{NAME}; my $async = AttrVal($name, "asyncMode", undef); - + my $usage = "Unknown argument, choose one of ". "reduceLog ". "reduceLogNbl ". @@ -628,28 +633,28 @@ sub DbLog_Set { "deleteOldDays ". "deleteOldDaysNbl ". "userCommand ". - "clearReadings:noArg ". + "clearReadings:noArg ". "eraseReadings:noArg ". "addLog " ; - - if (AttrVal($name, "asyncMode", undef)) { + + if (AttrVal($name, "asyncMode", undef)) { $usage .= "listCache:noArg ". "addCacheLine ". "purgeCache:noArg ". "commitCache:noArg ". - "exportCache:nopurge,purgecache " + "exportCache:nopurge,purgecache " ; } - + my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; my (@logs,$dir); - + my $dirdef = AttrVal("global", "logdir", $attr{global}{modpath}."/log/"); $dir = AttrVal($name, "expimpdir", $dirdef); $dir = $dir."/" if($dir !~ /.*\/$/); - + opendir(DIR,$dir); my $sd = "cache_".$name."_"; while (my $file = readdir(DIR)) { @@ -660,17 +665,17 @@ sub DbLog_Set { closedir(DIR); my $cj = ""; $cj = join(",",reverse(sort @logs)) if (@logs); - + if (@logs) { $usage .= "importCachefile:".$cj." "; - } + } else { $usage .= "importCachefile "; } - + return $usage if(int(@a) < 2); my $dbh = $hash->{DBHP}; - my $db = (split(/;|=/, $hash->{dbconn}))[1]; + my $db = (split(/;|=/, $hash->{dbconn}))[1]; my $ret; if ($a[1] eq 'reduceLog') { @@ -683,7 +688,7 @@ sub DbLog_Set { if (defined $a[2] && $a[2] =~ /(^\d+$)|(^\d+:\d+$)/) { $ret = DbLog_reduceLog($hash,@a); InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); - } + } else { Log3($name, 1, "DbLog $name: reduceLog error, no given."); $ret = "reduceLog error, no given."; @@ -697,9 +702,9 @@ sub DbLog_Set { return "ReduceLogNbl syntax error in set command. Please see commandref for help."; } if (defined $a[2] && $a[2] =~ /(^\d+$)|(^\d+:\d+$)/) { - if ($hash->{HELPER}{REDUCELOG_PID} && $hash->{HELPER}{REDUCELOG_PID}{pid} !~ m/DEAD/) { + if ($hash->{HELPER}{REDUCELOG_PID} && $hash->{HELPER}{REDUCELOG_PID}{pid} !~ m/DEAD/) { $ret = "reduceLogNbl already in progress. Please wait until the running process is finished."; - } + } else { delete $hash->{HELPER}{REDUCELOG_PID}; my @b = @a; @@ -709,26 +714,26 @@ sub DbLog_Set { $hash->{HELPER}{REDUCELOG_PID} = BlockingCall("DbLog_reduceLogNbl","$name","DbLog_reduceLogNbl_finished"); return; } - } + } else { Log3($name, 1, "DbLog $name: reduceLogNbl syntax error, no [:] given."); $ret = "reduceLogNbl error, no given."; } } - elsif ($a[1] eq 'clearReadings') { + elsif ($a[1] eq 'clearReadings') { my @allrds = keys%{$defs{$name}{READINGS}}; foreach my $key(@allrds) { next if($key =~ m/state/ || $key =~ m/CacheUsage/ || $key =~ m/NextSync/); readingsSingleUpdate($hash,$key," ",0); } } - elsif ($a[1] eq 'eraseReadings') { + elsif ($a[1] eq 'eraseReadings') { my @allrds = keys%{$defs{$name}{READINGS}}; foreach my $key(@allrds) { delete($defs{$name}{READINGS}{$key}) if($key !~ m/^state$/); } - } - elsif ($a[1] eq 'addLog') { + } + elsif ($a[1] eq 'addLog') { unless ($a[2]) { return "The argument of $a[1] is not valid. Please check commandref.";} my $nce = ("\!useExcludes" ~~ @a)?1:0; map(s/\!useExcludes//g, @a); @@ -743,12 +748,12 @@ sub DbLog_Set { return undef,$skip_trigger; } elsif ($a[1] eq 'reopen') { - return if(IsDisabled($name)); + return if(IsDisabled($name)); if ($dbh) { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + } $dbh->disconnect(); } if (!$a[2]) { @@ -761,32 +766,32 @@ sub DbLog_Set { } DbLog_execmemcache($hash) if($async); $ret = "Reopen executed."; - } + } else { unless ($a[2] =~ /^[0-9]+$/) { return " The Value of $a[1]-time is not valid. Use only figures 0-9 !";} # Statusbit "Kein Schreiben in DB erlauben" wenn reopen mit Zeitangabe $hash->{HELPER}{REOPEN_RUNS} = $a[2]; - + # falls ein hängender Prozess vorhanden ist -> löschen BlockingKill($hash->{HELPER}{".RUNNING_PID"}) if($hash->{HELPER}{".RUNNING_PID"}); BlockingKill($hash->{HELPER}{REDUCELOG_PID}) if($hash->{HELPER}{REDUCELOG_PID}); BlockingKill($hash->{HELPER}{COUNT_PID}) if($hash->{HELPER}{COUNT_PID}); BlockingKill($hash->{HELPER}{DELDAYS_PID}) if($hash->{HELPER}{DELDAYS_PID}); - delete $hash->{HELPER}{".RUNNING_PID"}; + delete $hash->{HELPER}{".RUNNING_PID"}; delete $hash->{HELPER}{COUNT_PID}; delete $hash->{HELPER}{DELDAYS_PID}; delete $hash->{HELPER}{REDUCELOG_PID}; - + my $ts = (split(" ",FmtDateTime(gettimeofday()+$a[2])))[1]; Log3($name, 2, "DbLog $name: Connection closed until $ts ($a[2] seconds)."); readingsSingleUpdate($hash, "state", "closed until $ts ($a[2] seconds)", 1); InternalTimer(gettimeofday()+$a[2], "DbLog_reopen", $hash, 0); - $hash->{HELPER}{REOPEN_RUNS_UNTIL} = $ts; + $hash->{HELPER}{REOPEN_RUNS_UNTIL} = $ts; } } elsif ($a[1] eq 'rereadcfg') { Log3($name, 3, "DbLog $name: Rereadcfg requested."); - + if ($dbh) { $dbh->commit() if(!$dbh->{AutoCommit}); $dbh->disconnect(); @@ -798,15 +803,15 @@ sub DbLog_Set { } elsif ($a[1] eq 'purgeCache') { delete $data{DbLog}{$name}{cache}; - readingsSingleUpdate($hash, 'CacheUsage', 0, 1); + readingsSingleUpdate($hash, 'CacheUsage', 0, 1); } elsif ($a[1] eq 'commitCache') { - DbLog_execmemcache($hash); + DbLog_execmemcache($hash); } elsif ($a[1] eq 'listCache') { my $cache; - foreach my $key (sort{$a <=>$b}keys %{$data{DbLog}{$name}{cache}{memcache}}) { - $cache .= $key." => ".$data{DbLog}{$name}{cache}{memcache}{$key}."\n"; + foreach my $key (sort{$a <=>$b}keys %{$data{DbLog}{$name}{cache}{memcache}}) { + $cache .= $key." => ".$data{DbLog}{$name}{cache}{memcache}{$key}."\n"; } return $cache; } @@ -823,20 +828,20 @@ sub DbLog_Set { } chop($aa); #letztes Leerzeichen entfernen $aa = DbLog_charfilter($aa) if(AttrVal($name, "useCharfilter",0)); - - my ($i_timestamp, $i_dev, $i_type, $i_evt, $i_reading, $i_val, $i_unit) = split("\\|",$aa); + + my ($i_timestamp, $i_dev, $i_type, $i_evt, $i_reading, $i_val, $i_unit) = split("\\|",$aa); if($i_timestamp !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})$/ || !$i_dev || !$i_reading) { return "Syntax error in set $a[1] command. Use this line format: YYYY-MM-DD HH:MM:SS||||||[] "; - } - my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($i_timestamp =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); + } + my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($i_timestamp =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $ts = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; - + if ($@) { my @l = split (/at/, $@); - return " Timestamp is out of range - $l[0]"; - } + return " Timestamp is out of range - $l[0]"; + } DbLog_addCacheLine($hash,$i_timestamp,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit); - + } elsif ($a[1] eq 'configCheck') { my $check = DbLog_configcheck($hash); @@ -846,46 +851,46 @@ sub DbLog_Set { my $cln; my $crows = 0; my $now = strftime('%Y-%m-%d_%H-%M-%S',localtime); - + my ($out,$outfile,$error); - - return "device is disabled" if(IsDisabled($name)); + + return "device is disabled" if(IsDisabled($name)); return "device not in asynch working mode" if(!AttrVal($name, "asyncMode", undef)); - + if(@logs && AttrVal($name, "exportCacheAppend", 0)) { # exportiertes Cachefile existiert und es soll an das neueste angehängt werden $outfile = $dir.pop(@logs); $out = ">>$outfile"; - } + } else { $outfile = $dir."cache_".$name."_".$now; $out = ">$outfile"; } - + if(open(FH, $out)) { binmode (FH); - } + } else { readingsSingleUpdate($hash, "lastCachefile", $outfile." - Error - ".$!, 1); $error = "could not open ".$outfile.": ".$!; } - + if(!$error) { for my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { $cln = $data{DbLog}{$name}{cache}{memcache}{$key}."\n"; print FH $cln ; - $crows++; + $crows++; } close(FH); readingsSingleUpdate($hash, "lastCachefile", $outfile." (".$crows." cache rows exported)", 1); } - + my $state = $error // $hash->{HELPER}{OLDSTATE}; DbLog_setReadingstate ($hash, $state); - + return $error if($error); - + Log3($name, 3, "DbLog $name: $crows cache rows exported to $outfile."); - + if (lc($a[-1]) =~ m/^purgecache/i) { delete $data{DbLog}{$name}{cache}; readingsSingleUpdate($hash, 'CacheUsage', 0, 1); @@ -899,23 +904,23 @@ sub DbLog_Set { my $infile; my @row_array; readingsSingleUpdate($hash, "lastCachefile", "", 0); - + return if(IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}); # return wenn "reopen" mit Ablaufzeit gestartet ist oder disabled - + if (!$a[2]) { return "Wrong function-call. Use set importCachefile without directory (see attr expimpdir)." ; - } + } else { $infile = $dir.$a[2]; } - + if (open(FH, "$infile")) { binmode (FH); - } + } else { return "could not open ".$infile.": ".$!; } - + while () { my $row = $_; $row = DbLog_charfilter($row) if(AttrVal($name, "useCharfilter",0)); @@ -923,14 +928,14 @@ sub DbLog_Set { $crows++; } close(FH); - + if(@row_array) { my $error = DbLog_Push($hash, 1, @row_array); if($error) { readingsSingleUpdate ($hash, "lastCachefile", $infile." - Error - ".$!, 1); DbLog_setReadingstate ($hash, $error); Log3 $name, 5, "DbLog $name -> DbLog_Push Returncode: $error"; - } + } else { unless(rename($dir.$a[2], $dir."impdone_".$a[2])) { Log3($name, 2, "DbLog $name: cachefile $infile couldn't be renamed after import !"); @@ -939,22 +944,22 @@ sub DbLog_Set { DbLog_setReadingstate ($hash, $crows." cache rows processed from ".$infile); Log3($name, 3, "DbLog $name: $crows cache rows processed from $infile."); } - } + } else { DbLog_setReadingstate ($hash, "no rows in ".$infile); Log3($name, 3, "DbLog $name: $infile doesn't contain any rows - no imports done."); } - + return; } elsif ($a[1] eq 'count') { Log3($name, 2, qq{DbLog $name - WARNING - "$a[1]" is outdated. Please consider use of DbRep "set countEntries" instead.}); $dbh = DbLog_ConnectNewDBH($hash); - + if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - count - DB connect not possible"); return; - } + } else { Log3($name, 4, "DbLog $name: Records count requested."); my $c = $dbh->selectrow_array("SELECT count(*) FROM $history"); @@ -962,34 +967,34 @@ sub DbLog_Set { $c = $dbh->selectrow_array("SELECT count(*) FROM $current"); readingsSingleUpdate($hash, 'countCurrent', $c ,1); $dbh->disconnect(); - - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } elsif ($a[1] eq 'countNbl') { Log3($name, 2, qq{DbLog $name - WARNING - "$a[1]" is outdated. Please consider use of DbRep "set countEntries" instead.}); - if ($hash->{HELPER}{COUNT_PID} && $hash->{HELPER}{COUNT_PID}{pid} !~ m/DEAD/){ + if ($hash->{HELPER}{COUNT_PID} && $hash->{HELPER}{COUNT_PID}{pid} !~ m/DEAD/){ $ret = "DbLog count already in progress. Please wait until the running process is finished."; - } + } else { delete $hash->{HELPER}{COUNT_PID}; $hash->{HELPER}{COUNT_PID} = BlockingCall("DbLog_countNbl","$name","DbLog_countNbl_finished"); return; - } + } } elsif ($a[1] eq 'deleteOldDays') { Log3($name, 2, qq{DbLog $name - WARNING - "$a[1]" is outdated. Please consider use of DbRep "set delEntries" instead.}); Log3 ($name, 3, "DbLog $name -> Deletion of records older than $a[2] days in database $db requested"); my ($c, $cmd); - + $dbh = DbLog_ConnectNewDBH($hash); if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - deleteOldDays - DB connect not possible"); return; - } + } else { $cmd = "delete from $history where TIMESTAMP < "; - + if ($hash->{MODEL} eq 'SQLITE') { $cmd .= "datetime('now', '-$a[2] days')"; } elsif ($hash->{MODEL} eq 'MYSQL') { $cmd .= "DATE_SUB(CURDATE(),INTERVAL $a[2] DAY)"; } elsif ($hash->{MODEL} eq 'POSTGRESQL') { $cmd .= "NOW() - INTERVAL '$a[2]' DAY"; } @@ -1003,14 +1008,14 @@ sub DbLog_Set { Log3 ($name, 3, "DbLog $name -> deleteOldDays finished. $c entries of database $db deleted."); readingsSingleUpdate($hash, 'lastRowsDeleted', $c ,1); } - + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } elsif ($a[1] eq 'deleteOldDaysNbl') { Log3($name, 2, qq{DbLog $name - WARNING - "$a[1]" is outdated. Please consider use of DbRep "set delEntries" instead.}); if (defined $a[2] && $a[2] =~ /^\d+$/) { - if ($hash->{HELPER}{DELDAYS_PID} && $hash->{HELPER}{DELDAYS_PID}{pid} !~ m/DEAD/) { + if ($hash->{HELPER}{DELDAYS_PID} && $hash->{HELPER}{DELDAYS_PID}{pid} !~ m/DEAD/) { $ret = "deleteOldDaysNbl already in progress. Please wait until the running process is finished."; } else { delete $hash->{HELPER}{DELDAYS_PID}; @@ -1019,7 +1024,7 @@ sub DbLog_Set { $hash->{HELPER}{DELDAYS_PID} = BlockingCall("DbLog_deldaysNbl","$name","DbLog_deldaysNbl_done"); return; } - } + } else { Log3($name, 1, "DbLog $name: deleteOldDaysNbl error, no given."); $ret = "deleteOldDaysNbl error, no given."; @@ -1031,7 +1036,7 @@ sub DbLog_Set { if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - userCommand - DB connect not possible"); return; - } + } else { Log3($name, 4, "DbLog $name: userCommand execution requested."); my ($c, @cmd, $sql); @@ -1039,20 +1044,20 @@ sub DbLog_Set { shift(@cmd); shift(@cmd); $sql = join(" ",@cmd); readingsSingleUpdate($hash, 'userCommand', $sql, 1); - $dbh->{RaiseError} = 1; + $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; my $error; eval { $c = $dbh->selectrow_array($sql); }; if($@) { $error = $@; - Log3($name, 1, "DbLog $name: DBLog_Set - $error"); + Log3($name, 1, "DbLog $name: DBLog_Set - $error"); } - + my $res = $error?$error:(defined($c))?$c:"no result"; Log3($name, 4, "DbLog $name: DBLog_Set - userCommand - result: $res"); readingsSingleUpdate($hash, 'userCommandResult', $res ,1); $dbh->disconnect(); - + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } @@ -1065,26 +1070,26 @@ return $ret; # # Exrahieren des Filters aus der ColumnsSpec (gplot-Datei) # -# Die grundlegend idee ist das jeder svg plot einen filter hat der angibt -# welches device und reading dargestellt wird so das der plot sich neu -# lädt wenn es ein entsprechendes event gibt. +# Die grundlegend idee ist das jeder svg plot einen filter hat der angibt +# welches device und reading dargestellt wird so das der plot sich neu +# lädt wenn es ein entsprechendes event gibt. # # Parameter: Quell-Instanz-Name, und alle FileLog-Parameter, die diese Instanz betreffen. # Quelle: http://forum.fhem.de/index.php/topic,40176.msg325200.html#msg325200 ############################################################################################### -sub DbLog_regexpFn { +sub DbLog_regexpFn { my ($name, $filter) = @_; my $ret; - + my @a = split( ' ', $filter ); for(my $i = 0; $i < int(@a); $i++) { my @fld = split(":", $a[$i]); $ret .= '|' if( $ret ); no warnings 'uninitialized'; # Forum:74690, bug unitialized - $ret .= $fld[0] .'.'. $fld[1]; + $ret .= $fld[0] .'.'. $fld[1]; use warnings; - } + } return $ret; } @@ -1107,15 +1112,15 @@ sub DbLog_ParseEvent { # "day-temp: 22.0 (Celsius)" -> "day-temp", "22.0 (Celsius)" my @parts = split(/: /,$event, 2); $reading = shift @parts; - if(@parts == 2) { + if(@parts == 2) { $value = $parts[0]; $unit = $parts[1]; - } + } else { $value = join(": ", @parts); $unit = ""; - } - + } + # Log3 $name, 2, "DbLog $name -> ParseEvent - Event: $event, Reading: $reading, Value: $value, Unit: $unit"; #default @@ -1124,11 +1129,11 @@ sub DbLog_ParseEvent { if($value eq "") { # Default Splitting geändert 04.01.20 Forum: #106992 if($event =~ /^.*:\s$/) { # und 21.01.20 Forum: #106769 $reading = (split(":", $event))[0]; - } + } else { $reading = "state"; $value = $event; - } + } } #globales Abfangen von # changed in Version 4.12.5 @@ -1137,7 +1142,7 @@ sub DbLog_ParseEvent { #if ($reading =~ m(^temperature)) { $unit = "°C"; } # wenn reading mit temperature beginnt #elsif($reading =~ m(^humidity)) { $unit = "%"; } # wenn reading mit humidity beginnt if($reading =~ m(^humidity)) { $unit = "%"; } # wenn reading mit humidity beginnt - + # the interpretation of the argument depends on the device type # EMEM, M232Counter, M232Voltage return plain numbers @@ -1145,12 +1150,12 @@ sub DbLog_ParseEvent { ($type eq "M232Counter") || ($type eq "EMEM")) { } - #OneWire + #OneWire elsif(($type eq "OWMULTI")) { if(int(@parts) > 1) { $reading = "data"; $value = $event; - } + } else { @parts = split(/\|/, AttrVal($device, $reading."VUnit", "")); $unit = $parts[1] if($parts[1]); @@ -1158,9 +1163,9 @@ sub DbLog_ParseEvent { $value =~ s/ \(Celsius\)//; $value =~ s/([-\.\d]+).*/$1/; $unit = "°C"; - } elsif (lc($reading) =~ m/(humidity|vwc)/) { - $value =~ s/ \(\%\)//; - $unit = "%"; + } elsif (lc($reading) =~ m/(humidity|vwc)/) { + $value =~ s/ \(\%\)//; + $unit = "%"; } } } @@ -1169,13 +1174,13 @@ sub DbLog_ParseEvent { if(int(@parts)>1) { $reading = "data"; $value = $event; - } + } else { @parts = split(/\|/, AttrVal($device, $reading."Unit", "")); $unit = $parts[1] if($parts[1]); } } - + # ZWAVE elsif ($type eq "ZWAVE") { if ( $value =~/([-\.\d]+)\s([a-z].*)/i ) { @@ -1191,7 +1196,7 @@ sub DbLog_ParseEvent { $unit = $2; } } - + # MAX elsif(($type eq "MAX")) { $unit = "°C" if(lc($reading) =~ m/temp/); @@ -1217,10 +1222,10 @@ sub DbLog_ParseEvent { $reading = $parts[0]; $value = $parts[1]; $unit = ""; - } elsif($reading =~ m(-temp)) { - $value =~ s/ \(Celsius\)//; $unit= "°C"; - } elsif($reading =~ m(temp-offset)) { - $value =~ s/ \(Celsius\)//; $unit= "°C"; + } elsif($reading =~ m(-temp)) { + $value =~ s/ \(Celsius\)//; $unit= "°C"; + } elsif($reading =~ m(temp-offset)) { + $value =~ s/ \(Celsius\)//; $unit= "°C"; } elsif($reading =~ m(^actuator[0-9]*)) { if($value eq "lime-protection") { $reading = "actuator-lime-protection"; @@ -1248,7 +1253,7 @@ sub DbLog_ParseEvent { } elsif($value eq "pair") { $reading = "actuator-pair"; undef $value; - } + } else { $value =~ s/%//; $value = $value*1.; $unit = "%"; } @@ -1272,14 +1277,14 @@ sub DbLog_ParseEvent { } # HMS elsif($type eq "HMS" || $type eq "CUL_WS" || $type eq "OWTHERM") { - if($event =~ m(T:.*)) { - $reading = "data"; $value= $event; + if($event =~ m(T:.*)) { + $reading = "data"; $value= $event; } elsif($reading eq "temperature") { - $value =~ s/ \(Celsius\)//; + $value =~ s/ \(Celsius\)//; $value =~ s/([-\.\d]+).*/$1/; #OWTHERM - $unit = "°C"; - } elsif($reading eq "humidity") { - $value =~ s/ \(\%\)//; $unit= "%"; + $unit = "°C"; + } elsif($reading eq "humidity") { + $value =~ s/ \(\%\)//; $unit= "%"; } elsif($reading eq "battery") { $value =~ s/ok/1/; $value =~ s/replaced/1/; @@ -1288,7 +1293,7 @@ sub DbLog_ParseEvent { } # CUL_HM elsif ($type eq "CUL_HM") { - $value =~ s/ \%$//; # remove trailing % + $value =~ s/ \%$//; # remove trailing % } # BS @@ -1311,14 +1316,14 @@ sub DbLog_ParseEvent { # RFXTRX Sensors elsif($type eq "TRX_WEATHER") { - if($reading eq "energy_current") { - $value =~ s/ W//; - } elsif($reading eq "energy_total") { - $value =~ s/ kWh//; + if($reading eq "energy_current") { + $value =~ s/ W//; + } elsif($reading eq "energy_total") { + $value =~ s/ kWh//; } elsif($reading eq "battery") { - if ($value =~ m/(\d+)\%/) { - $value = $1; - } + if ($value =~ m/(\d+)\%/) { + $value = $1; + } else { $value = ($value eq "ok"); } @@ -1364,7 +1369,7 @@ sub DbLog_ParseEvent { } @result = ($reading,$value,$unit); - + return @result; } @@ -1395,18 +1400,18 @@ sub DbLog_Log { # Notify-Routine Startzeit my $nst = [gettimeofday]; - - my $events = deviceEvents($dev_hash, AttrVal($name, "addStateEvent", 1)); + + my $events = deviceEvents($dev_hash, AttrVal($name, "addStateEvent", 1)); return if(!$events); - + my $max = int(@{$events}); - + # verbose4 Logs nur für Devices in Attr "verbose4Devs" my $vb4show = 0; my @vb4devs = split(",", AttrVal($name, "verbose4Devs", "")); if (!@vb4devs) { $vb4show = 1; - } + } else { foreach (@vb4devs) { if($dev_name =~ m/$_/i) { @@ -1415,14 +1420,14 @@ sub DbLog_Log { } } } - + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}) { Log3 $name, 4, "DbLog $name -> ################################################################"; Log3 $name, 4, "DbLog $name -> ### start of new Logcycle ###"; Log3 $name, 4, "DbLog $name -> ################################################################"; Log3 $name, 4, "DbLog $name -> number of events received: $max of device: $dev_name"; } - + my $re = $hash->{REGEXP}; my @row_array; my ($event,$reading,$value,$unit); @@ -1432,37 +1437,37 @@ sub DbLog_Log { my $DbLogInclude = AttrVal($dev_name, "DbLogInclude", undef); my $DbLogValueFn = AttrVal($dev_name, "DbLogValueFn",""); my $DbLogSelectionMode = AttrVal($name, "DbLogSelectionMode","Exclude"); - my $value_fn = AttrVal($name, "valueFn", ""); - + my $value_fn = AttrVal($name, "valueFn", ""); + # Funktion aus Device spezifischer DbLogValueFn validieren if( $DbLogValueFn =~ m/^\s*(\{.*\})\s*$/s ) { $DbLogValueFn = $1; - } + } else { $DbLogValueFn = ''; } - + # Funktion aus Attr valueFn validieren if( $value_fn =~ m/^\s*(\{.*\})\s*$/s ) { $value_fn = $1; - } + } else { $value_fn = ''; } - + #one Transaction - eval { + eval { for (my $i = 0; $i < $max; $i++) { my $next = 0; my $event = $events->[$i]; $event = "" if(!defined($event)); $event = DbLog_charfilter($event) if(AttrVal($name, "useCharfilter",0)); - Log3 ($name, 4, "DbLog $name -> check Device: $dev_name , Event: $event") if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - + Log3 ($name, 4, "DbLog $name -> check Device: $dev_name , Event: $event") if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + if($dev_name =~ m/^$re$/ || "$dev_name:$event" =~ m/^$re$/ || $DbLogSelectionMode eq 'Include') { my $timestamp = $ts_0; $timestamp = $dev_hash->{CHANGETIME}[$i] if(defined($dev_hash->{CHANGETIME}[$i])); - + my $ctz = AttrVal($name, 'convertTimezone', 'none'); # convert time zone if($ctz ne 'none') { my $err; @@ -1473,17 +1478,17 @@ sub DbLog_Log { tzconv => $ctz, writelog => 0 }; - + ($err, $timestamp) = convertTimeZone ($params); - + if ($err) { Log3 ($name, 1, "DbLog $name - ERROR while converting time zone: $err - exit log loop !"); last; } } - + $event =~ s/\|/_ESC_/gxs; # escape Pipe "|" - + my @r = DbLog_ParseEvent($name,$dev_name, $dev_type, $event); $reading = $r[0]; $value = $r[1]; @@ -1491,9 +1496,9 @@ sub DbLog_Log { if(!defined $reading) {$reading = "";} if(!defined $value) {$value = "";} if(!defined $unit || $unit eq "") {$unit = AttrVal("$dev_name", "unit", "");} - + $unit = DbLog_charfilter($unit) if(AttrVal($name, "useCharfilter",0)); - + # Devices / Readings ausschließen durch Attribut "excludeDevs" # attr excludeDevs [#],[#],[#] my ($exc,@excldr,$ds,$rd,@exdvs); @@ -1501,7 +1506,7 @@ sub DbLog_Log { if($exc) { $exc =~ s/[\s\n]/,/g; @excldr = split(",",$exc); - + for my $excl (@excldr) { ($ds,$rd) = split("#",$excl); @exdvs = devspec2array($ds); @@ -1512,68 +1517,68 @@ sub DbLog_Log { Log3 $name, 4, "DbLog $name -> Device:Reading \"$dev_name:$reading\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); $next = 1; } - } + } else { if($dev_name =~ m/^$ed$/) { Log3 $name, 4, "DbLog $name -> Device \"$dev_name\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); $next = 1; - } + } } } } } - next if($next); + next if($next); } - - Log3 $name, 5, "DbLog $name -> parsed Event: $dev_name , Event: $event" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + + Log3 $name, 5, "DbLog $name -> parsed Event: $dev_name , Event: $event" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); Log3 $name, 5, "DbLog $name -> DbLogExclude of \"$dev_name\": $DbLogExclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogExclude); Log3 $name, 5, "DbLog $name -> DbLogInclude of \"$dev_name\": $DbLogInclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogInclude); - + # Je nach DBLogSelectionMode muss das vorgegebene Ergebnis der Include-, bzw. Exclude-Pruefung # entsprechend unterschiedlich vorbelegt sein. # keine Readings loggen die in DbLogExclude explizit ausgeschlossen sind my $DoIt = 0; - + $DoIt = 1 if($DbLogSelectionMode =~ m/Exclude/ ); - + if($DbLogExclude && $DbLogSelectionMode =~ m/Exclude/) { # Bsp: "(temperature|humidity):300,battery:3600:force" my @v1 = split(/,/, $DbLogExclude); - + for (my $i=0; $i error device \"$dev_name\" specific DbLogValueFn: ".$@ if($@); - + if($IGNORE) { # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{TIME} = $lastt if($lastt); # patch Forum:#111423 - $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{VALUE} = $lastv if(defined $lastv); - + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{VALUE} = $lastv if(defined $lastv); + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}) { Log3 $name, 4, "DbLog $name -> Event ignored by device \"$dev_name\" specific DbLogValueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit"; } - - next; + + next; } - + my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; - + if (!$@) { $timestamp = $TIMESTAMP; - } + } else { Log3 ($name, 2, "DbLog $name -> TIMESTAMP got from DbLogValueFn in $dev_name is invalid: $TIMESTAMP"); } - + $reading = $READING if($READING ne ''); $value = $VALUE if(defined $VALUE); $unit = $UNIT if(defined $UNIT); } - + # zentrale valueFn im DbLog-Device abarbeiten if($value_fn ne '') { my $TIMESTAMP = $timestamp; @@ -1650,28 +1655,28 @@ sub DbLog_Log { eval $value_fn; Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - + if($IGNORE) { # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{TIME} = $lastt if($lastt); # patch Forum:#111423 - $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{VALUE} = $lastv if(defined $lastv); - + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$name}{VALUE} = $lastv if(defined $lastv); + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}) { Log3 $name, 4, "DbLog $name -> Event ignored by valueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit"; } - - next; + + next; } - + my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; - + if (!$@) { $timestamp = $TIMESTAMP; - } + } else { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } - + $dev_name = $DEVICE if($DEVICE ne ''); $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); $reading = $READING if($READING ne ''); @@ -1681,16 +1686,16 @@ sub DbLog_Log { # Daten auf maximale Länge beschneiden ($dev_name,$dev_type,$event,$reading,$value,$unit) = DbLog_cutCol($hash,$dev_name,$dev_type,$event,$reading,$value,$unit); - + my $row = ($timestamp."|".$dev_name."|".$dev_type."|".$event."|".$reading."|".$value."|".$unit); Log3 $name, 4, "DbLog $name -> added event - Timestamp: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" - if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - - if($async) { # asynchoner non-blocking Mode + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + + if($async) { # asynchoner non-blocking Mode $data{DbLog}{$name}{cache}{index}++; # Cache & CacheIndex für Events zum asynchronen Schreiben in DB my $index = $data{DbLog}{$name}{cache}{index}; $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - + my $memcount = $data{DbLog}{$name}{cache}{memcache} ? scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}) : 0; my $mce = $ce == 1 ? 1 : 0; @@ -1706,40 +1711,40 @@ sub DbLog_Log { } } $net = tv_interval($nst); # Notify-Routine Laufzeit ermitteln - } + } else { # synchoner Mode - push(@row_array, $row); - } - } + push(@row_array, $row); + } + } } } - }; - if(!$async) { + }; + if(!$async) { if(@row_array) { # synchoner Mode - return if($hash->{HELPER}{REOPEN_RUNS}); # return wenn "reopen" mit Ablaufzeit gestartet ist - + return if($hash->{HELPER}{REOPEN_RUNS}); # return wenn "reopen" mit Ablaufzeit gestartet ist + my $error = DbLog_Push($hash, $vb4show, @row_array); Log3 ($name, 5, "DbLog $name -> DbLog_Push Returncode: $error") if($error && $vb4show); - + CancelDelayedShutdown($name) if($hash->{HELPER}{SHUTDOWNSEQ}); Log3 ($name, 2, "DbLog $name - Last database write cycle done") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); - + my $state = $error ? $error : (IsDisabled($name)) ? "disabled" : "connected"; DbLog_setReadingstate ($hash, $state); - + # Notify-Routine Laufzeit ermitteln $net = tv_interval($nst); - } + } else { CancelDelayedShutdown($name) if($hash->{HELPER}{SHUTDOWNSEQ}); Log3 ($name, 2, "DbLog $name - no data for last database write cycle") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); } } - + if($net && AttrVal($name, "showNotifyTime", undef)) { readingsSingleUpdate($hash, "notify_processing_time", sprintf("%.4f",$net), 1); } - + return; } @@ -1753,63 +1758,63 @@ sub DbLog_checkDefMinInt { my ($name,$dev_name,$now,$reading,$value) = @_; my $force; my $DoIt = 1; - + my $defminint = AttrVal($name, "defaultMinInterval", undef); return $DoIt if(!$defminint); # Attribut "defaultMinInterval" nicht im DbLog gesetzt -> kein ToDo - + my $DbLogExclude = AttrVal($dev_name, "DbLogExclude", undef); my $DbLogInclude = AttrVal($dev_name, "DbLogInclude", undef); $defminint =~ s/[\s\n]/,/g; my @adef = split(/,/, $defminint); - + my $inex = ($DbLogExclude?$DbLogExclude.",":"").($DbLogInclude?$DbLogInclude:""); - + if($inex) { # Quelldevice hat DbLogExclude und/oder DbLogInclude gesetzt my @ie = split(/,/, $inex); for (my $k=0; $k kein Überschreiben durch $defminint } } } - + for (my $l=0; $l don't log it !"); $DoIt = 0; return $DoIt; - } + } } } } } # Log3 ($name, 1, "DbLog $name - defaulMInInterval - compare of \"$dev_name\", reading \"$reading\" successful -> log it !"); - + return $DoIt; } ################################################################################################# -# Schreibroutine Einfügen Werte in DB im Synchronmode +# Schreibroutine Einfügen Werte in DB im Synchronmode ################################################################################################# sub DbLog_Push { my ($hash, $vb4show, @row_array) = @_; @@ -1823,109 +1828,109 @@ sub DbLog_Push { my $current = $hash->{HELPER}{TC}; my $errorh = ""; my $error = ""; - my $doins = 0; # Hilfsvariable, wenn "1" sollen inserts in Tabelle current erfolgen (updates schlugen fehl) + my $doins = 0; # Hilfsvariable, wenn "1" sollen inserts in Tabelle current erfolgen (updates schlugen fehl) my $dbh; - + my $nh = ($hash->{MODEL} ne 'SQLITE') ? 1 : 0; - # Unterscheidung $dbh um Abbrüche in Plots (SQLite) zu vermeiden und + # Unterscheidung $dbh um Abbrüche in Plots (SQLite) zu vermeiden und # andererseite kein "MySQL-Server has gone away" Fehler if ($nh) { $dbh = DbLog_ConnectNewDBH($hash); return if(!$dbh); - } + } else { $dbh = $hash->{DBHP}; eval { if ( !$dbh || not $dbh->ping ) { # DB Session dead, try to reopen now ! DbLog_ConnectPush($hash,1); - } + } }; if ($@) { Log3($name, 1, "DbLog $name: DBLog_Push - DB Session dead! - $@"); return $@; - } + } else { $dbh = $hash->{DBHP}; } - } - - $dbh->{RaiseError} = 1; + } + + $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; - - if($tl) { # Tracelevel setzen - $dbh->{TraceLevel} = "$tl|$tf"; - } - + + if($tl) { # Tracelevel setzen + $dbh->{TraceLevel} = "$tl|$tf"; + } + my ($useac,$useta) = DbLog_commitMode($hash); my $ac = ($dbh->{AutoCommit})?"ON":"OFF"; my $tm = ($useta)?"ON":"OFF"; - + Log3 $name, 4, "DbLog $name -> ################################################################"; Log3 $name, 4, "DbLog $name -> ### New database processing cycle - synchronous ###"; Log3 $name, 4, "DbLog $name -> ################################################################"; Log3 $name, 4, "DbLog $name -> DbLogType is: $DbLogType"; Log3 $name, 4, "DbLog $name -> AutoCommit mode: $ac, Transaction mode: $tm"; Log3 $name, 4, "DbLog $name -> Insert mode: ".($bi?"Bulk":"Array"); - - # check ob PK verwendet wird, @usepkx?Anzahl der Felder im PK:0 wenn kein PK, $pkx?Namen der Felder:none wenn kein PK + + # check ob PK verwendet wird, @usepkx?Anzahl der Felder im PK:0 wenn kein PK, $pkx?Namen der Felder:none wenn kein PK my ($usepkh,$usepkc,$pkh,$pkc); if (!$supk) { ($usepkh,$usepkc,$pkh,$pkc) = DbLog_checkUsePK($hash,$dbh); - } + } else { Log3 $hash->{NAME}, 5, "DbLog $name -> Primary Key usage suppressed by attribute noSupportPK"; } - + my (@timestamp,@device,@type,@event,@reading,@value,@unit); my (@timestamp_cur,@device_cur,@type_cur,@event_cur,@reading_cur,@value_cur,@unit_cur); my ($st,$sth_ih,$sth_ic,$sth_uc,$sqlins); my ($tuples, $rows); - + no warnings 'uninitialized'; - + my $ceti = $#row_array+1; - + foreach my $row (@row_array) { my @a = split("\\|",$row); s/_ESC_/\|/gxs for @a; # escaped Pipe return to "|" - push(@timestamp, "$a[0]"); - push(@device, "$a[1]"); - push(@type, "$a[2]"); - push(@event, "$a[3]"); - push(@reading, "$a[4]"); - push(@value, "$a[5]"); + push(@timestamp, "$a[0]"); + push(@device, "$a[1]"); + push(@type, "$a[2]"); + push(@event, "$a[3]"); + push(@reading, "$a[4]"); + push(@value, "$a[5]"); push(@unit, "$a[6]"); Log3 $hash->{NAME}, 4, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]" - if($vb4show); - } + if($vb4show); + } use warnings; - + if($bi) { ####################### # Bulk-Insert ####################### $st = [gettimeofday]; # SQL-Startzeit - - if (lc($DbLogType) =~ m(history)) { - ######################################## + + if (lc($DbLogType) =~ m(history)) { + ######################################## # insert history mit/ohne primary key if ($usepkh && $hash->{MODEL} eq 'MYSQL') { $sqlins = "INSERT IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } elsif ($usepkh && $hash->{MODEL} eq 'SQLITE') { $sqlins = "INSERT OR IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } elsif ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { $sqlins = "INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } else { # ohne PK $sqlins = "INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } - - no warnings 'uninitialized'; + } + + no warnings 'uninitialized'; foreach my $row (@row_array) { - my @a = split("\\|",$row); + my @a = split("\\|",$row); s/_ESC_/\|/gxs for @a; # escaped Pipe return to "|" Log3 $hash->{NAME}, 5, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]"; $a[3] =~ s/'/''/g; # escape ' with '' @@ -1935,79 +1940,79 @@ sub DbLog_Push { $a[5] =~ s/\\/\\\\/g; # escape \ with \\ $a[6] =~ s/\\/\\\\/g; # escape \ with \\ $sqlins .= "('$a[0]','$a[1]','$a[2]','$a[3]','$a[4]','$a[5]','$a[6]'),"; - } + } use warnings; - + chop($sqlins); - + if ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { $sqlins .= " ON CONFLICT DO NOTHING"; } - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $history - $@"); } eval { $sth_ih = $dbh->prepare($sqlins); if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_ih->{TraceLevel} = "$tl|$tf"; - } + } my $ins_hist = $sth_ih->execute(); $ins_hist = 0 if($ins_hist eq "0E0"); - + if($ins_hist == $ceti) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ins_hist of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); - } + } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; - } - else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; } - } + else { + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + } + } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert table $history committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert table $history committed by autocommit"); } - } + } }; - + if ($@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error rollback $history - $@"); - } + } else { Log3($name, 4, "DbLog $name -> insert $history rolled back"); } - } - } + } + } if (lc($DbLogType) =~ m(current)) { ################################################################# - # insert current mit/ohne primary key - # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode - # die nicht upgedateten Sätze nicht identifiziert werden können + # insert current mit/ohne primary key + # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode + # die nicht upgedateten Sätze nicht identifiziert werden können if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2015,26 +2020,26 @@ sub DbLog_Push { if ($@) { return $@; } - + if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { + $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { $sth_uc = $dbh->prepare("INSERT OR REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { - $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) - DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { + $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) + DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } - else { + } + else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } - + if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_uc->{TraceLevel} = "$tl|$tf"; $sth_ic->{TraceLevel} = "$tl|$tf"; } - + $sth_uc->bind_param_array(1, [@timestamp]); $sth_uc->bind_param_array(2, [@type]); $sth_uc->bind_param_array(3, [@event]); @@ -2042,7 +2047,7 @@ sub DbLog_Push { $sth_uc->bind_param_array(5, [@unit]); $sth_uc->bind_param_array(6, [@device]); $sth_uc->bind_param_array(7, [@reading]); - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $current - $@"); @@ -2055,23 +2060,23 @@ sub DbLog_Push { $status = 0 if($status eq "0E0"); next if($status); # $status ist "1" wenn update ok Log3 $hash->{NAME}, 4, "DbLog $name -> Failed to update in $current, try to insert - TS: $timestamp[$tuple], Device: $device[$tuple], Reading: $reading[$tuple], Status = $status"; - push(@timestamp_cur, "$timestamp[$tuple]"); - push(@device_cur, "$device[$tuple]"); - push(@type_cur, "$type[$tuple]"); - push(@event_cur, "$event[$tuple]"); - push(@reading_cur, "$reading[$tuple]"); - push(@value_cur, "$value[$tuple]"); + push(@timestamp_cur, "$timestamp[$tuple]"); + push(@device_cur, "$device[$tuple]"); + push(@type_cur, "$type[$tuple]"); + push(@event_cur, "$event[$tuple]"); + push(@reading_cur, "$reading[$tuple]"); + push(@value_cur, "$value[$tuple]"); push(@unit_cur, "$unit[$tuple]"); $nupd_cur++; } if(!$nupd_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events updated in table $current".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> $nupd_cur of $ceti events not updated and try to insert into table $current".($usepkc?" using PK on columns $pkc":""); $doins = 1; } - + if ($doins) { # events die nicht in Tabelle current updated wurden, werden in current neu eingefügt $sth_ic->bind_param_array(1, [@timestamp_cur]); @@ -2081,7 +2086,7 @@ sub DbLog_Push { $sth_ic->bind_param_array(5, [@reading_cur]); $sth_ic->bind_param_array(6, [@value_cur]); $sth_ic->bind_param_array(7, [@unit_cur]); - + ($tuples, $rows) = $sth_ic->execute_array( { ArrayTupleStatus => \my @tuple_status } ); my $nins_cur = 0; for my $tuple (0..$#device_cur) { @@ -2093,7 +2098,7 @@ sub DbLog_Push { } if(!$nins_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1)." of ".($#device_cur+1)." events inserted into table $current ".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1-$nins_cur)." of ".($#device_cur+1)." events inserted into table $current".($usepkc?" using PK on columns $pkc":""); } @@ -2101,37 +2106,37 @@ sub DbLog_Push { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error commit table $current - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert / update table $current committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert / update table $current committed by autocommit"); } } - }; - } - } + }; + } + } else { ####################### # Array-Insert - ####################### - - $st = [gettimeofday]; # SQL-Startzeit - + ####################### + + $st = [gettimeofday]; # SQL-Startzeit + if (lc($DbLogType) =~ m(history)) { - ######################################## + ######################################## # insert history mit/ohne primary key if ($usepkh && $hash->{MODEL} eq 'MYSQL') { eval { $sth_ih = $dbh->prepare("INSERT IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkh && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ih = $dbh->prepare("INSERT OR IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ih = $dbh->prepare("INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ih = $dbh->prepare("INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2141,17 +2146,17 @@ sub DbLog_Push { } if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_ih->{TraceLevel} = "$tl|$tf"; - } - + } + $sth_ih->bind_param_array(1, [@timestamp]); $sth_ih->bind_param_array(2, [@device]); $sth_ih->bind_param_array(3, [@type]); $sth_ih->bind_param_array(4, [@event]); $sth_ih->bind_param_array(5, [@reading]); $sth_ih->bind_param_array(6, [@value]); - $sth_ih->bind_param_array(7, [@unit]); + $sth_ih->bind_param_array(7, [@unit]); eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { @@ -2163,61 +2168,61 @@ sub DbLog_Push { for my $tuple (0..$#row_array) { my $status = $tuple_status[$tuple]; $status = 0 if($status eq "0E0"); - next if($status); # $status ist "1" wenn insert ok + next if($status); # $status ist "1" wenn insert ok Log3 $hash->{NAME}, 3, "DbLog $name -> Insert into $history rejected".($usepkh?" (possible PK violation) ":" ")."- TS: $timestamp[$tuple], Device: $device[$tuple], Event: $event[$tuple]"; my $nlh = ($timestamp[$tuple]."|".$device[$tuple]."|".$type[$tuple]."|".$event[$tuple]."|".$reading[$tuple]."|".$value[$tuple]."|".$unit[$tuple]); $nins_hist++; } if(!$nins_hist) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); - } + } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table $history due to PK on columns $pkh"; - } + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table $history due to PK on columns $pkh"; + } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; - } + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; + } } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert table $history committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert table $history committed by autocommit"); } } }; - + if ($@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error rollback $history - $@"); - } + } else { Log3($name, 4, "DbLog $name -> insert $history rolled back"); } - } - } - + } + } + if (lc($DbLogType) =~ m(current)) { ######################################## - # insert current mit/ohne primary key + # insert current mit/ohne primary key if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2225,28 +2230,28 @@ sub DbLog_Push { if ($@) { return $@; } - + if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } - elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { + $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); + } + elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { $sth_uc = $dbh->prepare("INSERT OR REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } - elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { - $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) - DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, + } + elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { + $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) + DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } - else { + } + else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } - + if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_uc->{TraceLevel} = "$tl|$tf"; $sth_ic->{TraceLevel} = "$tl|$tf"; } - + $sth_uc->bind_param_array(1, [@timestamp]); $sth_uc->bind_param_array(2, [@type]); $sth_uc->bind_param_array(3, [@event]); @@ -2254,7 +2259,7 @@ sub DbLog_Push { $sth_uc->bind_param_array(5, [@unit]); $sth_uc->bind_param_array(6, [@device]); $sth_uc->bind_param_array(7, [@reading]); - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $current - $@"); @@ -2267,23 +2272,23 @@ sub DbLog_Push { $status = 0 if($status eq "0E0"); next if($status); # $status ist "1" wenn update ok Log3 $hash->{NAME}, 4, "DbLog $name -> Failed to update in $current, try to insert - TS: $timestamp[$tuple], Device: $device[$tuple], Reading: $reading[$tuple], Status = $status"; - push(@timestamp_cur, "$timestamp[$tuple]"); - push(@device_cur, "$device[$tuple]"); - push(@type_cur, "$type[$tuple]"); - push(@event_cur, "$event[$tuple]"); - push(@reading_cur, "$reading[$tuple]"); - push(@value_cur, "$value[$tuple]"); + push(@timestamp_cur, "$timestamp[$tuple]"); + push(@device_cur, "$device[$tuple]"); + push(@type_cur, "$type[$tuple]"); + push(@event_cur, "$event[$tuple]"); + push(@reading_cur, "$reading[$tuple]"); + push(@value_cur, "$value[$tuple]"); push(@unit_cur, "$unit[$tuple]"); $nupd_cur++; } if(!$nupd_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events updated in table $current".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> $nupd_cur of $ceti events not updated and try to insert into table $current".($usepkc?" using PK on columns $pkc":""); $doins = 1; } - + if ($doins) { # events die nicht in Tabelle current updated wurden, werden in current neu eingefügt $sth_ic->bind_param_array(1, [@timestamp_cur]); @@ -2293,7 +2298,7 @@ sub DbLog_Push { $sth_ic->bind_param_array(5, [@reading_cur]); $sth_ic->bind_param_array(6, [@value_cur]); $sth_ic->bind_param_array(7, [@unit_cur]); - + ($tuples, $rows) = $sth_ic->execute_array( { ArrayTupleStatus => \my @tuple_status } ); my $nins_cur = 0; for my $tuple (0..$#device_cur) { @@ -2305,7 +2310,7 @@ sub DbLog_Push { } if(!$nins_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1)." of ".($#device_cur+1)." events inserted into table $current ".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1-$nins_cur)." of ".($#device_cur+1)." events inserted into table $current".($usepkc?" using PK on columns $pkc":""); } @@ -2313,24 +2318,24 @@ sub DbLog_Push { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error commit table $current - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert / update table $current committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert / update table $current committed by autocommit"); } } - }; + }; } } - + # SQL-Laufzeit ermitteln my $rt = tv_interval($st); - + if(AttrVal($name, "showproctime", 0)) { - readingsBeginUpdate($hash); + readingsBeginUpdate($hash); readingsBulkUpdate($hash, "sql_processing_time", sprintf("%.4f",$rt)); readingsEndUpdate($hash, 0); } @@ -2340,11 +2345,11 @@ sub DbLog_Push { } if(!$tl) { # Trace ausschalten - $dbh->{TraceLevel} = "0"; - $sth_ih->{TraceLevel} = "0"; + $dbh->{TraceLevel} = "0"; + $sth_ih->{TraceLevel} = "0"; } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; $dbh->disconnect if ($nh); @@ -2356,40 +2361,40 @@ return Encode::encode_utf8($error); ################################################################################################# sub DbLog_execmemcache { my $hash = shift; - my $name = $hash->{NAME}; + my $name = $hash->{NAME}; my $syncival = AttrVal($name, "syncInterval", 30 ); my $clim = AttrVal($name, "cacheLimit", $dblog_cachedef ); my $async = AttrVal($name, "asyncMode", 0 ); my $ce = AttrVal($name, "cacheEvents", 0 ); my $timeout = AttrVal($name, "timeout", 86400 ); my $DbLogType = AttrVal($name, "DbLogType", "History" ); - + my $dbconn = $hash->{dbconn}; my $dbuser = $hash->{dbuser}; my $dbpassword = $attr{"sec$name"}{secret}; my $dolog = 1; - + my (@row_array,$dbh,$error); - + RemoveInternalTimer($hash, "DbLog_execmemcache"); - + if($init_done != 1) { InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); return; } - + my $memcount = $data{DbLog}{$name}{cache}{memcache} ? scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}) : 0; my $params = { hash => $hash, clim => $clim, memcount => $memcount }; - + if(!$async || IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}) { # return wenn "reopen" mit Zeitangabe läuft, oder kein asynchroner Mode oder wenn disabled DbLog_writeFileIfCacheOverflow ($params); # Cache exportieren bei Overflow return; } - + if($hash->{HELPER}{".RUNNING_PID"} && $hash->{HELPER}{".RUNNING_PID"}{pid} =~ m/DEAD/) { # tote PID's löschen delete $hash->{HELPER}{".RUNNING_PID"}; } @@ -2399,7 +2404,7 @@ sub DbLog_execmemcache { if($hash->{HELPER}{DELDAYS_PID} && $hash->{HELPER}{DELDAYS_PID}{pid} =~ m/DEAD/) { delete $hash->{HELPER}{DELDAYS_PID}; } - + if($hash->{MODEL} eq "SQLITE") { # bei SQLite Sperrverwaltung Logging wenn andere schreibende Zugriffe laufen if($hash->{HELPER}{DELDAYS_PID}) { $error = "deleteOldDaysNbl is running - resync at NextSync"; @@ -2414,12 +2419,12 @@ sub DbLog_execmemcache { $dolog = 0; } } - + my $mce = $ce == 2 ? 1 : 0; readingsSingleUpdate($hash, "CacheUsage", $memcount, $mce); - - if($memcount && $dolog && !$hash->{HELPER}{".RUNNING_PID"}) { + + if($memcount && $dolog && !$hash->{HELPER}{".RUNNING_PID"}) { Log3 ($name, 4, "DbLog $name -> ################################################################"); Log3 ($name, 4, "DbLog $name -> ### New database processing cycle - asynchronous ###"); Log3 ($name, 4, "DbLog $name -> ################################################################"); @@ -2428,48 +2433,48 @@ sub DbLog_execmemcache { my $wrotefile = DbLog_writeFileIfCacheOverflow ($params); # Cache exportieren bei Overflow return if($wrotefile); - + for my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { Log3 ($hash->{NAME}, 5, "DbLog $name -> MemCache contains: ".$data{DbLog}{$name}{cache}{memcache}{$key}); - push (@row_array, delete($data{DbLog}{$name}{cache}{memcache}{$key})); - } - + push (@row_array, delete($data{DbLog}{$name}{cache}{memcache}{$key})); + } + undef $data{DbLog}{$name}{cache}{memcache}; # sicherheitshalber Memory freigeben: https://perlmaven.com/undef-on-perl-arrays-and-hashes , bzw. https://www.effectiveperlprogramming.com/2018/09/undef-a-scalar-to-release-its-memory/ - + my $rowlist = join('§', @row_array); $rowlist = encode_base64($rowlist,""); - + $hash->{HELPER}{".RUNNING_PID"} = BlockingCall ( - "DbLog_PushAsync", - "$name|$rowlist", - "DbLog_PushAsyncDone", - $timeout, - "DbLog_PushAsyncAborted", - $hash + "DbLog_PushAsync", + "$name|$rowlist", + "DbLog_PushAsyncDone", + $timeout, + "DbLog_PushAsyncAborted", + $hash ); - + $hash->{HELPER}{".RUNNING_PID"}{loglevel} = 4; Log3 ($hash->{NAME}, 5, "DbLog $name -> DbLog_PushAsync called with timeout: $timeout"); - } + } else { if($dolog && $hash->{HELPER}{".RUNNING_PID"}) { $error = "Commit already running - resync at NextSync"; DbLog_writeFileIfCacheOverflow ($params); # Cache exportieren bei Overflow - } + } else { CancelDelayedShutdown($name) if($hash->{HELPER}{SHUTDOWNSEQ}); Log3 ($name, 2, "DbLog $name - no data for last database write cycle") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); } } - + my $nextsync = gettimeofday()+$syncival; my $nsdt = FmtDateTime($nextsync); my $se = AttrVal($name, "syncEvents", undef) ? 1 : 0; - + readingsSingleUpdate($hash, "NextSync", $nsdt. " or if CacheUsage ".$clim." reached", $se); - + DbLog_setReadingstate ($hash, $error); - + InternalTimer($nextsync, "DbLog_execmemcache", $hash, 0); return; @@ -2486,24 +2491,24 @@ sub DbLog_writeFileIfCacheOverflow { my $hash = $paref->{hash}; my $clim = $paref->{clim}; my $memcount = $paref->{memcount}; - + my $name = $hash->{NAME}; my $success = 0; my $coft = AttrVal($name, "cacheOverflowThreshold", 0); # Steuerung exportCache statt schreiben in DB $coft = ($coft && $coft < $clim) ? $clim : $coft; # cacheOverflowThreshold auf cacheLimit setzen wenn kleiner als cacheLimit - + my $overflowstate = "normal"; - my $overflownum; - + my $overflownum; + if($coft) { $overflownum = $memcount >= $coft ? $memcount-$coft : 0; } else { $overflownum = $memcount >= $clim ? $memcount-$clim : 0; } - + $overflowstate = "exceeded" if($overflownum); - + readingsBeginUpdate($hash); readingsBulkUpdate ($hash, "CacheOverflowLastNum", $overflownum ); readingsBulkUpdateIfChanged ($hash, "CacheOverflowLastState", $overflowstate, 1); @@ -2512,18 +2517,18 @@ sub DbLog_writeFileIfCacheOverflow { if($coft && $memcount >= $coft) { Log3 ($name, 2, "DbLog $name -> WARNING - Cache is exported to file instead of logging it to database"); my $error = CommandSet (undef, qq{$name exportCache purgecache}); - + if($error) { # Fehler beim Export Cachefile Log3 ($name, 1, "DbLog $name -> ERROR - while exporting Cache file: $error"); - DbLog_setReadingstate ($hash, $error); + DbLog_setReadingstate ($hash, $error); return $success; } - + DbLog_setReadingstate ($hash, qq{Cache exported to "lastCachefile" due to Cache overflow}); delete $hash->{HELPER}{LASTLIMITRUNTIME}; $success = 1; } - + return $success; } @@ -2531,13 +2536,13 @@ return $success; # Reading state setzen ################################################################ sub DbLog_setReadingstate { - my $hash = shift; + my $hash = shift; my $val = shift // $hash->{HELPER}{OLDSTATE}; my $evt = ($val eq $hash->{HELPER}{OLDSTATE}) ? 0 : 1; readingsSingleUpdate($hash, "state", $val, $evt); $hash->{HELPER}{OLDSTATE} = $val; - + return; } @@ -2563,24 +2568,24 @@ sub DbLog_PushAsync { my $current = $hash->{HELPER}{TC}; my $errorh = 0; my $error = 0; - my $doins = 0; # Hilfsvariable, wenn "1" sollen inserts in Tabelle current erfolgen (updates schlugen fehl) + my $doins = 0; # Hilfsvariable, wenn "1" sollen inserts in Tabelle current erfolgen (updates schlugen fehl) my $dbh; my $rowlback = 0; # Eventliste für Rückgabe wenn Fehler - + Log3 ($name, 5, "DbLog $name -> Start DbLog_PushAsync"); Log3 ($name, 5, "DbLog $name -> DbLogType is: $DbLogType"); - + # Background-Startzeit my $bst = [gettimeofday]; - + my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } elsif($useac == 1) { $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } else { # Server default $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); @@ -2592,76 +2597,76 @@ sub DbLog_PushAsync { Log3 ($name, 5, "DbLog $name -> DbLog_PushAsync finished"); return "$name|$error|0|$rowlist"; } - + if($tl) { - # Tracelevel setzen - $dbh->{TraceLevel} = "$tl|$tf"; - } - + # Tracelevel setzen + $dbh->{TraceLevel} = "$tl|$tf"; + } + my $ac = ($dbh->{AutoCommit})?"ON":"OFF"; my $tm = ($useta)?"ON":"OFF"; Log3 $hash->{NAME}, 4, "DbLog $name -> AutoCommit mode: $ac, Transaction mode: $tm"; Log3 $hash->{NAME}, 4, "DbLog $name -> Insert mode: ".($bi?"Bulk":"Array"); - - # check ob PK verwendet wird, @usepkx?Anzahl der Felder im PK:0 wenn kein PK, $pkx?Namen der Felder:none wenn kein PK + + # check ob PK verwendet wird, @usepkx?Anzahl der Felder im PK:0 wenn kein PK, $pkx?Namen der Felder:none wenn kein PK my ($usepkh,$usepkc,$pkh,$pkc); if (!$supk) { ($usepkh,$usepkc,$pkh,$pkc) = DbLog_checkUsePK($hash,$dbh); - } + } else { Log3 $hash->{NAME}, 5, "DbLog $name -> Primary Key usage suppressed by attribute noSupportPK"; } - + my $rowldec = decode_base64($rowlist); my @row_array = split('§', $rowldec); my $ceti = $#row_array+1; - + my (@timestamp,@device,@type,@event,@reading,@value,@unit); my (@timestamp_cur,@device_cur,@type_cur,@event_cur,@reading_cur,@value_cur,@unit_cur); my ($st,$sth_ih,$sth_ic,$sth_uc,$sqlins); my ($tuples, $rows); - + no warnings 'uninitialized'; foreach my $row (@row_array) { - my @a = split("\\|",$row); + my @a = split("\\|",$row); s/_ESC_/\|/gxs for @a; # escaped Pipe return to "|" - push(@timestamp, "$a[0]"); - push(@device, "$a[1]"); - push(@type, "$a[2]"); - push(@event, "$a[3]"); - push(@reading, "$a[4]"); - push(@value, "$a[5]"); + push(@timestamp, "$a[0]"); + push(@device, "$a[1]"); + push(@type, "$a[2]"); + push(@event, "$a[3]"); + push(@reading, "$a[4]"); + push(@value, "$a[5]"); push(@unit, "$a[6]"); - Log3 $hash->{NAME}, 5, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]"; - } - use warnings; - + Log3 $hash->{NAME}, 5, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]"; + } + use warnings; + if($bi) { ####################### # Bulk-Insert ####################### $st = [gettimeofday]; # SQL-Startzeit - - if (lc($DbLogType) =~ m(history)) { - ######################################## + + if (lc($DbLogType) =~ m(history)) { + ######################################## # insert history mit/ohne primary key if ($usepkh && $hash->{MODEL} eq 'MYSQL') { $sqlins = "INSERT IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } elsif ($usepkh && $hash->{MODEL} eq 'SQLITE') { $sqlins = "INSERT OR IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } elsif ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { $sqlins = "INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } + } else { # ohne PK $sqlins = "INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES "; - } - no warnings 'uninitialized'; - + } + no warnings 'uninitialized'; + for my $row (@row_array) { - my @a = split("\\|",$row); + my @a = split("\\|",$row); s/_ESC_/\|/gxs for @a; # escaped Pipe return to "|" Log3 $hash->{NAME}, 5, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]"; $a[3] =~ s/'/''/g; # escape ' with '' @@ -2671,75 +2676,75 @@ sub DbLog_PushAsync { $a[5] =~ s/\\/\\\\/g; # escape \ with \\ $a[6] =~ s/\\/\\\\/g; # escape \ with \\ $sqlins .= "('$a[0]','$a[1]','$a[2]','$a[3]','$a[4]','$a[5]','$a[6]'),"; - } - + } + use warnings; - + chop($sqlins); - + if ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { $sqlins .= " ON CONFLICT DO NOTHING"; } - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $history - $@"); } - + eval { $sth_ih = $dbh->prepare($sqlins); - if($tl) { # Tracelevel setzen + if($tl) { # Tracelevel setzen $sth_ih->{TraceLevel} = "$tl|$tf"; - } + } my $ins_hist = $sth_ih->execute(); $ins_hist = 0 if($ins_hist eq "0E0"); - + if($ins_hist == $ceti) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ins_hist of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); - } + } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; - } - else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; } - } + else { + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + } + } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert table $history committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert table $history committed by autocommit"); } - } + } }; - + if ($@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; $error = encode_base64($errorh,""); $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein } - } + } if (lc($DbLogType) =~ m(current)) { ################################################################# - # insert current mit/ohne primary key - # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode - # die nicht upgedateten Sätze nicht identifiziert werden können + # insert current mit/ohne primary key + # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode + # die nicht upgedateten Sätze nicht identifiziert werden können if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2751,28 +2756,28 @@ sub DbLog_PushAsync { $dbh->disconnect(); return "$name|$error|0|"; } - + if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } - elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { + $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); + } + elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { $sth_uc = $dbh->prepare("INSERT OR REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } - elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { - $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) - DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, + } + elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { + $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) + DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } - else { + } + else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } - + if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_uc->{TraceLevel} = "$tl|$tf"; $sth_ic->{TraceLevel} = "$tl|$tf"; } - + $sth_uc->bind_param_array(1, [@timestamp]); $sth_uc->bind_param_array(2, [@type]); $sth_uc->bind_param_array(3, [@event]); @@ -2780,7 +2785,7 @@ sub DbLog_PushAsync { $sth_uc->bind_param_array(5, [@unit]); $sth_uc->bind_param_array(6, [@device]); $sth_uc->bind_param_array(7, [@reading]); - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $current - $@"); @@ -2793,23 +2798,23 @@ sub DbLog_PushAsync { $status = 0 if($status eq "0E0"); next if($status); # $status ist "1" wenn update ok Log3 $hash->{NAME}, 4, "DbLog $name -> Failed to update in $current, try to insert - TS: $timestamp[$tuple], Device: $device[$tuple], Reading: $reading[$tuple], Status = $status"; - push(@timestamp_cur, "$timestamp[$tuple]"); - push(@device_cur, "$device[$tuple]"); - push(@type_cur, "$type[$tuple]"); - push(@event_cur, "$event[$tuple]"); - push(@reading_cur, "$reading[$tuple]"); - push(@value_cur, "$value[$tuple]"); + push(@timestamp_cur, "$timestamp[$tuple]"); + push(@device_cur, "$device[$tuple]"); + push(@type_cur, "$type[$tuple]"); + push(@event_cur, "$event[$tuple]"); + push(@reading_cur, "$reading[$tuple]"); + push(@value_cur, "$value[$tuple]"); push(@unit_cur, "$unit[$tuple]"); $nupd_cur++; } if(!$nupd_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events updated in table $current".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> $nupd_cur of $ceti events not updated and try to insert into table $current".($usepkc?" using PK on columns $pkc":""); $doins = 1; } - + if ($doins) { # events die nicht in Tabelle current updated wurden, werden in current neu eingefügt $sth_ic->bind_param_array(1, [@timestamp_cur]); @@ -2819,7 +2824,7 @@ sub DbLog_PushAsync { $sth_ic->bind_param_array(5, [@reading_cur]); $sth_ic->bind_param_array(6, [@value_cur]); $sth_ic->bind_param_array(7, [@unit_cur]); - + ($tuples, $rows) = $sth_ic->execute_array( { ArrayTupleStatus => \my @tuple_status } ); my $nins_cur = 0; for my $tuple (0..$#device_cur) { @@ -2831,7 +2836,7 @@ sub DbLog_PushAsync { } if(!$nins_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1)." of ".($#device_cur+1)." events inserted into table $current ".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1-$nins_cur)." of ".($#device_cur+1)." events inserted into table $current".($usepkc?" using PK on columns $pkc":""); } @@ -2839,37 +2844,37 @@ sub DbLog_PushAsync { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error commit table $current - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert / update table $current committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert / update table $current committed by autocommit"); } } - }; - } - } + }; + } + } else { ####################### # Array-Insert - ####################### - - $st = [gettimeofday]; # SQL-Startzeit - + ####################### + + $st = [gettimeofday]; # SQL-Startzeit + if (lc($DbLogType) =~ m(history)) { - ######################################## + ######################################## # insert history mit/ohne primary key if ($usepkh && $hash->{MODEL} eq 'MYSQL') { eval { $sth_ih = $dbh->prepare("INSERT IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkh && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ih = $dbh->prepare("INSERT OR IGNORE INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkh && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ih = $dbh->prepare("INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ih = $dbh->prepare("INSERT INTO $history (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2884,17 +2889,17 @@ sub DbLog_PushAsync { } if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_ih->{TraceLevel} = "$tl|$tf"; - } - + } + $sth_ih->bind_param_array(1, [@timestamp]); $sth_ih->bind_param_array(2, [@device]); $sth_ih->bind_param_array(3, [@type]); $sth_ih->bind_param_array(4, [@event]); $sth_ih->bind_param_array(5, [@reading]); $sth_ih->bind_param_array(6, [@value]); - $sth_ih->bind_param_array(7, [@unit]); + $sth_ih->bind_param_array(7, [@unit]); eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { @@ -2907,7 +2912,7 @@ sub DbLog_PushAsync { for my $tuple (0..$#row_array) { my $status = $tuple_status[$tuple]; $status = 0 if($status eq "0E0"); - next if($status); # $status ist "1" wenn insert ok + next if($status); # $status ist "1" wenn insert ok Log3 $hash->{NAME}, 3, "DbLog $name -> Insert into $history rejected".($usepkh?" (possible PK violation) ":" ")."- TS: $timestamp[$tuple], Device: $device[$tuple], Event: $event[$tuple]"; my $nlh = ($timestamp[$tuple]."|".$device[$tuple]."|".$type[$tuple]."|".$event[$tuple]."|".$reading[$tuple]."|".$value[$tuple]."|".$unit[$tuple]); push(@n2hist, "$nlh"); @@ -2915,52 +2920,52 @@ sub DbLog_PushAsync { } if(!$nins_hist) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); - } + } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table history due to PK on columns $pkh"; - } + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table history due to PK on columns $pkh"; + } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; } s/\|/_ESC_/gxs for @n2hist; # escape Pipe "|" $rowlist = join('§', @n2hist); - $rowlist = encode_base64($rowlist,""); + $rowlist = encode_base64($rowlist,""); } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert table $history committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert table $history committed by autocommit"); } } }; - + if ($@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; $error = encode_base64($errorh,""); $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein - } - } - + } + } + if (lc($DbLogType) =~ m(current)) { ######################################## - # insert current mit/ohne primary key + # insert current mit/ohne primary key if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; - } + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?) ON CONFLICT DO NOTHING"); }; - } + } else { # ohne PK eval { $sth_ic = $dbh->prepare("INSERT INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; @@ -2973,26 +2978,26 @@ sub DbLog_PushAsync { $dbh->disconnect(); return "$name|$error|0|$rowlist"; } - + if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { + $sth_uc = $dbh->prepare("REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); + } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { $sth_uc = $dbh->prepare("INSERT OR REPLACE INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?)"); - } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { - $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) - DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, + } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { + $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) + DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } - else { + } + else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } - + if($tl) { - # Tracelevel setzen + # Tracelevel setzen $sth_uc->{TraceLevel} = "$tl|$tf"; $sth_ic->{TraceLevel} = "$tl|$tf"; } - + $sth_uc->bind_param_array(1, [@timestamp]); $sth_uc->bind_param_array(2, [@type]); $sth_uc->bind_param_array(3, [@event]); @@ -3000,7 +3005,7 @@ sub DbLog_PushAsync { $sth_uc->bind_param_array(5, [@unit]); $sth_uc->bind_param_array(6, [@device]); $sth_uc->bind_param_array(7, [@reading]); - + eval { $dbh->begin_work() if($useta && $dbh->{AutoCommit}); }; # Transaktion wenn gewünscht und autocommit ein if ($@) { Log3($name, 2, "DbLog $name -> Error start transaction for $current - $@"); @@ -3013,23 +3018,23 @@ sub DbLog_PushAsync { $status = 0 if($status eq "0E0"); next if($status); # $status ist "1" wenn update ok Log3 $hash->{NAME}, 4, "DbLog $name -> Failed to update in $current, try to insert - TS: $timestamp[$tuple], Device: $device[$tuple], Reading: $reading[$tuple], Status = $status"; - push(@timestamp_cur, "$timestamp[$tuple]"); - push(@device_cur, "$device[$tuple]"); - push(@type_cur, "$type[$tuple]"); - push(@event_cur, "$event[$tuple]"); - push(@reading_cur, "$reading[$tuple]"); - push(@value_cur, "$value[$tuple]"); + push(@timestamp_cur, "$timestamp[$tuple]"); + push(@device_cur, "$device[$tuple]"); + push(@type_cur, "$type[$tuple]"); + push(@event_cur, "$event[$tuple]"); + push(@reading_cur, "$reading[$tuple]"); + push(@value_cur, "$value[$tuple]"); push(@unit_cur, "$unit[$tuple]"); $nupd_cur++; } if(!$nupd_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events updated in table $current".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> $nupd_cur of $ceti events not updated and try to insert into table $current".($usepkc?" using PK on columns $pkc":""); $doins = 1; } - + if ($doins) { # events die nicht in Tabelle current updated wurden, werden in current neu eingefügt $sth_ic->bind_param_array(1, [@timestamp_cur]); @@ -3039,7 +3044,7 @@ sub DbLog_PushAsync { $sth_ic->bind_param_array(5, [@reading_cur]); $sth_ic->bind_param_array(6, [@value_cur]); $sth_ic->bind_param_array(7, [@unit_cur]); - + ($tuples, $rows) = $sth_ic->execute_array( { ArrayTupleStatus => \my @tuple_status } ); my $nins_cur = 0; for my $tuple (0..$#device_cur) { @@ -3051,7 +3056,7 @@ sub DbLog_PushAsync { } if(!$nins_cur) { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1)." of ".($#device_cur+1)." events inserted into table $current ".($usepkc?" using PK on columns $pkc":""); - } + } else { Log3 $hash->{NAME}, 4, "DbLog $name -> ".($#device_cur+1-$nins_cur)." of ".($#device_cur+1)." events inserted into table $current".($usepkc?" using PK on columns $pkc":""); } @@ -3059,31 +3064,31 @@ sub DbLog_PushAsync { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # issue Turning on AutoCommit failed if ($@) { Log3($name, 2, "DbLog $name -> Error commit table $current - $@"); - } + } else { if(!$dbh->{AutoCommit}) { Log3($name, 4, "DbLog $name -> insert / update table $current committed"); - } + } else { Log3($name, 4, "DbLog $name -> insert / update table $current committed by autocommit"); } } - }; + }; } } - + $dbh->disconnect(); - + # SQL-Laufzeit ermitteln my $rt = tv_interval($st); - + Log3 ($name, 5, "DbLog $name -> DbLog_PushAsync finished"); # Background-Laufzeit ermitteln my $brt = tv_interval($bst); $rt = $rt.",".$brt; - + return "$name|$error|$rt|$rowlback"; } @@ -3102,36 +3107,36 @@ sub DbLog_PushAsyncDone { my $memcount; Log3 ($name, 5, "DbLog $name -> Start DbLog_PushAsyncDone"); - + if($rowlist) { $rowlist = decode_base64($rowlist); my @row_array = split('§', $rowlist); - + #one Transaction - eval { + eval { for my $row (@row_array) { # Cache & CacheIndex für Events zum asynchronen Schreiben in DB $data{DbLog}{$name}{cache}{index}++; my $index = $data{DbLog}{$name}{cache}{index}; $data{DbLog}{$name}{cache}{memcache}{$index} = $row; } $memcount = scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}); - }; + }; } $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); - + if(AttrVal($name, "showproctime", undef) && $bt) { my ($rt,$brt) = split(",", $bt); readingsBeginUpdate($hash); - readingsBulkUpdate($hash, "background_processing_time", sprintf("%.4f",$brt)); + readingsBulkUpdate($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsBulkUpdate($hash, "sql_processing_time", sprintf("%.4f",$rt)); readingsEndUpdate($hash, 1); } - + my $state = $error ? $error : (IsDisabled($name)) ? "disabled" : "connected"; - DbLog_setReadingstate ($hash, $state); - + DbLog_setReadingstate ($hash, $state); + if(!$asyncmode) { delete($defs{$name}{READINGS}{NextSync}); delete($defs{$name}{READINGS}{background_processing_time}); @@ -3140,14 +3145,14 @@ sub DbLog_PushAsyncDone { } delete $hash->{HELPER}{".RUNNING_PID"}; delete $hash->{HELPER}{LASTLIMITRUNTIME} if(!$error); - Log3 ($name, 5, "DbLog $name -> DbLog_PushAsyncDone finished"); - + Log3 ($name, 5, "DbLog $name -> DbLog_PushAsyncDone finished"); + Log3 ($name, 2, "DbLog $name - Last database write cycle done") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); CancelDelayedShutdown($name); - + return; } - + ############################################################################################# # Abbruchroutine Timeout non-blocking asynchron DbLog_PushAsync ############################################################################################# @@ -3155,16 +3160,16 @@ sub DbLog_PushAsyncAborted { my ($hash,$cause) = @_; my $name = $hash->{NAME}; $cause = $cause?$cause:"Timeout: process terminated"; - + Log3 ($name, 2, "DbLog $name -> ".$hash->{HELPER}{".RUNNING_PID"}{fn}." ".$cause) if(!$hash->{HELPER}{SHUTDOWNSEQ}); - DbLog_setReadingstate ($hash, $cause); - + DbLog_setReadingstate ($hash, $cause); + delete $hash->{HELPER}{".RUNNING_PID"}; delete $hash->{HELPER}{LASTLIMITRUNTIME}; - + Log3 ($name, 2, "DbLog $name - Last database write cycle done") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); CancelDelayedShutdown($name); - + return; } @@ -3178,12 +3183,12 @@ return; sub DbLog_explode_datetime { my ($t, %def) = @_; my %retv; - + my (@datetime, @date, @time); @datetime = split(" ", $t); #Datum und Zeit auftrennen @date = split("-", $datetime[0]); @time = split(":", $datetime[1]) if ($datetime[1]); - + if ($date[0]) {$retv{year} = $date[0];} else {$retv{year} = $def{year};} if ($date[1]) {$retv{month} = $date[1];} else {$retv{month} = $def{month};} if ($date[2]) {$retv{day} = $date[2];} else {$retv{day} = $def{day};} @@ -3192,7 +3197,7 @@ sub DbLog_explode_datetime { if ($time[2]) {$retv{second} = $time[2];} else {$retv{second} = $def{second};} $retv{datetime} = DbLog_implode_datetime($retv{year}, $retv{month}, $retv{day}, $retv{hour}, $retv{minute}, $retv{second}); - + # Log 1, Dumper(%retv); return %retv } @@ -3217,7 +3222,7 @@ sub DbLog_readCfg { # use generic fileRead to get configuration data my ($err, @config) = FileRead($configfilename); return $err if($err); - + eval join("\n", @config); return "could not read connection" if (!defined $dbconfig{connection}); @@ -3230,82 +3235,82 @@ sub DbLog_readCfg { #check the database model if($hash->{dbconn} =~ m/pg:/i) { $hash->{MODEL}="POSTGRESQL"; - } + } elsif ($hash->{dbconn} =~ m/mysql:/i) { $hash->{MODEL}="MYSQL"; - } + } elsif ($hash->{dbconn} =~ m/oracle:/i) { $hash->{MODEL}="ORACLE"; - } + } elsif ($hash->{dbconn} =~ m/sqlite:/i) { $hash->{MODEL}="SQLITE"; - } + } else { $hash->{MODEL}="unknown"; Log3 $hash->{NAME}, 1, "Unknown database model found in configuration file $configfilename."; Log3 $hash->{NAME}, 1, "Only MySQL/MariaDB, PostgreSQL, Oracle, SQLite are fully supported."; return "unknown database type"; } - + if($hash->{MODEL} eq "MYSQL") { $hash->{UTF8} = defined($dbconfig{utf8})?$dbconfig{utf8}:0; } - + return; } sub DbLog_ConnectPush { - # own $dbhp for synchronous logging and dblog_get + # own $dbhp for synchronous logging and dblog_get my ($hash,$get) = @_; my $name = $hash->{NAME}; my $dbconn = $hash->{dbconn}; my $dbuser = $hash->{dbuser}; my $dbpassword = $attr{"sec$name"}{secret}; my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; - + my ($dbhp,$state,$evt,$err); - + return 0 if(IsDisabled($name)); - + if($init_done != 1) { InternalTimer(gettimeofday()+5, "DbLog_ConnectPush", $hash, 0); return; } - + Log3 $hash->{NAME}, 3, "DbLog $name - Creating Push-Handle to database $dbconn with user $dbuser" if(!$get); my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } elsif($useac == 1) { $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } else { # Server default $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } - }; - + }; + if($@) { $err = $@; Log3 $hash->{NAME}, 2, "DbLog $name - Error: $@"; } - + if(!$dbhp) { RemoveInternalTimer($hash, "DbLog_ConnectPush"); Log3 $hash->{NAME}, 4, "DbLog $name - Trying to connect to database"; - + $state = $err ? $err : (IsDisabled($name)) ? "disabled" : "disconnected"; - DbLog_setReadingstate ($hash, $state); - + DbLog_setReadingstate ($hash, $state); + InternalTimer(gettimeofday()+5, 'DbLog_ConnectPush', $hash, 0); Log3 $hash->{NAME}, 4, "DbLog $name - Waiting for database connection"; return 0; } - - $dbhp->{RaiseError} = 0; + + $dbhp->{RaiseError} = 0; $dbhp->{PrintError} = 1; Log3 $hash->{NAME}, 3, "DbLog $name - Push-Handle to db $dbconn created" if(!$get); @@ -3316,13 +3321,13 @@ sub DbLog_ConnectPush { } $hash->{DBHP} = $dbhp; - + if ($hash->{MODEL} eq "SQLITE") { $dbhp->do("PRAGMA temp_store=MEMORY"); - $dbhp->do("PRAGMA synchronous=FULL"); # For maximum reliability and for robustness against database corruption, + $dbhp->do("PRAGMA synchronous=FULL"); # For maximum reliability and for robustness against database corruption, # SQLite should always be run with its default synchronous setting of FULL. # https://sqlite.org/howtocorrupt.html - + if (AttrVal($name, "SQLiteJournalMode", "WAL") eq "off") { $dbhp->do("PRAGMA journal_mode=off"); $hash->{SQLITEWALMODE} = "off"; @@ -3331,12 +3336,12 @@ sub DbLog_ConnectPush { $dbhp->do("PRAGMA journal_mode=WAL"); $hash->{SQLITEWALMODE} = "on"; } - + my $cs = AttrVal($name, "SQLiteCacheSize", "4000"); $dbhp->do("PRAGMA cache_size=$cs"); $hash->{SQLITECACHESIZE} = $cs; } - + return 1; } @@ -3349,50 +3354,50 @@ sub DbLog_ConnectNewDBH { my $dbpassword = $attr{"sec$name"}{secret}; my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; my $dbh; - + my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } elsif($useac == 1) { $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } else { # Server default $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); - } + } }; - + if($@) { Log3($name, 2, "DbLog $name - $@"); my $state = $@ ? $@ : (IsDisabled($name)) ? "disabled" : "disconnected"; DbLog_setReadingstate ($hash, $state); } - + if($dbh) { - $dbh->{RaiseError} = 0; + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; - + if ($hash->{MODEL} eq "SQLITE") { # Forum: https://forum.fhem.de/index.php/topic,120237.0.html $dbh->do("PRAGMA temp_store=MEMORY"); - $dbh->do("PRAGMA synchronous=FULL"); # For maximum reliability and for robustness against database corruption, + $dbh->do("PRAGMA synchronous=FULL"); # For maximum reliability and for robustness against database corruption, # SQLite should always be run with its default synchronous setting of FULL. # https://sqlite.org/howtocorrupt.html - + if (AttrVal($name, "SQLiteJournalMode", "WAL") eq "off") { $dbh->do("PRAGMA journal_mode=off"); } else { $dbh->do("PRAGMA journal_mode=WAL"); } - + my $cs = AttrVal($name, "SQLiteCacheSize", "4000"); $dbh->do("PRAGMA cache_size=$cs"); } - + return $dbh; - } + } else { return 0; } @@ -3410,9 +3415,9 @@ sub DbLog_ExecSQL { my ($hash,$sql) = @_; my $name = $hash->{NAME}; my $dbh = DbLog_ConnectNewDBH($hash); - + Log3($name, 4, "DbLog $name - Backdoor executing: $sql"); - + return if(!$dbh); my $sth = DbLog_ExecSQL1($hash,$dbh,$sql); if(!$sth) { @@ -3420,7 +3425,7 @@ sub DbLog_ExecSQL { $dbh->disconnect(); $dbh = DbLog_ConnectNewDBH($hash); return if(!$dbh); - + Log3($name, 2, "DbLog $name - Backdoor retry: $sql"); $sth = DbLog_ExecSQL1($hash,$dbh,$sql); if(!$sth) { @@ -3439,17 +3444,17 @@ return $sth; sub DbLog_ExecSQL1 { my ($hash,$dbh,$sql)= @_; my $name = $hash->{NAME}; - - $dbh->{RaiseError} = 1; + + $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; - + my $sth; eval { $sth = $dbh->do($sql); }; if($@) { Log3($name, 2, "DbLog $name - ERROR: $@"); return 0; } - + return $sth; } @@ -3468,7 +3473,7 @@ sub DbLog_Get { my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; my ($dbh,$err); - + return DbLog_dbReadings($hash,@a) if $a[1] =~ m/^Readings/; return "Usage: get $a[0] ...\n". @@ -3477,7 +3482,7 @@ sub DbLog_Get { " is not used, only for compatibility for FileLog, please use - \n" . " is a prefix, - means stdout\n" if(int(@a) < 5); - + shift @a; my $inf = lc(shift @a); my $outf = lc(shift @a); # Wert ALL: get all colums from table, including a header @@ -3507,7 +3512,7 @@ sub DbLog_Get { } ######################## - # getter für SVG + # getter für SVG ######################## my @readings = (); my (%sqlspec, %from_datetime, %to_datetime); @@ -3526,13 +3531,13 @@ sub DbLog_Get { Log3($name, 1, "DbLog $name - wrong date/time format (from: $from) requested by SVG: $err"); return; } - + $err = DbLog_checkTimeformat($to); # Forum: https://forum.fhem.de/index.php/topic,101005.0.html if($err) { Log3($name, 1, "DbLog $name - wrong date/time format (to: $to) requested by SVG: $err"); return; } - + if($to =~ /(\d{4})-(\d{2})-(\d{2}) 23:59:59/) { # 03.09.2018 : https://forum.fhem.de/index.php/topic,65860.msg815640.html#msg815640 $to =~ /(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/; @@ -3564,28 +3569,28 @@ sub DbLog_Get { Log3 $name, 4, "DbLog $name -> ### new get data for SVG ###"; Log3 $name, 4, "DbLog $name -> ################################################################"; Log3($name, 4, "DbLog $name -> main PID: $hash->{PID}, secondary PID: $$"); - + my $nh = ($hash->{MODEL} ne 'SQLITE') ? 1 : 0; if ($nh || $hash->{PID} != $$) { # 17.04.2019 Forum: https://forum.fhem.de/index.php/topic,99719.0.html $dbh = DbLog_ConnectNewDBH($hash); return "Can't connect to database." if(!$dbh); - } + } else { $dbh = $hash->{DBHP}; eval { if ( !$dbh || not $dbh->ping ) { # DB Session dead, try to reopen now ! DbLog_ConnectPush($hash,1); - } + } }; if ($@) { Log3($name, 1, "DbLog $name: DBLog_Push - DB Session dead! - $@"); return $@; - } + } else { $dbh = $hash->{DBHP}; } - } + } # vorbereiten der DB-Abfrage, DB-Modell-abhaengig if ($hash->{MODEL} eq "POSTGRESQL") { @@ -3596,7 +3601,7 @@ sub DbLog_Get { $sqlspec{order_by_hour} = "TO_CHAR(TIMESTAMP, 'YYYY-MM-DD HH24')"; $sqlspec{max_value} = "MAX(VALUE)"; $sqlspec{day_before} = "($sqlspec{from_timestamp} - INTERVAL '1 DAY')"; - } + } elsif ($hash->{MODEL} eq "ORACLE") { $sqlspec{get_timestamp} = "TO_CHAR(TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS')"; $sqlspec{from_timestamp} = "TO_TIMESTAMP('$from', 'YYYY-MM-DD HH24:MI:SS')"; @@ -3604,7 +3609,7 @@ sub DbLog_Get { $sqlspec{order_by_hour} = "TO_CHAR(TIMESTAMP, 'YYYY-MM-DD HH24')"; $sqlspec{max_value} = "MAX(VALUE)"; $sqlspec{day_before} = "DATE_SUB($sqlspec{from_timestamp},INTERVAL 1 DAY)"; - } + } elsif ($hash->{MODEL} eq "MYSQL") { $sqlspec{get_timestamp} = "DATE_FORMAT(TIMESTAMP, '%Y-%m-%d %H:%i:%s')"; $sqlspec{from_timestamp} = "STR_TO_DATE('$from', '%Y-%m-%d %H:%i:%s')"; @@ -3612,7 +3617,7 @@ sub DbLog_Get { $sqlspec{order_by_hour} = "DATE_FORMAT(TIMESTAMP, '%Y-%m-%d %H')"; $sqlspec{max_value} = "MAX(VALUE)"; # 12.04.2019 Forum: https://forum.fhem.de/index.php/topic,99280.0.html $sqlspec{day_before} = "DATE_SUB($sqlspec{from_timestamp},INTERVAL 1 DAY)"; - } + } elsif ($hash->{MODEL} eq "SQLITE") { $sqlspec{get_timestamp} = "TIMESTAMP"; $sqlspec{from_timestamp} = "'$from'"; @@ -3620,7 +3625,7 @@ sub DbLog_Get { $sqlspec{order_by_hour} = "strftime('%Y-%m-%d %H', TIMESTAMP)"; $sqlspec{max_value} = "MAX(VALUE)"; $sqlspec{day_before} = "date($sqlspec{from_timestamp},'-1 day')"; - } + } else { $sqlspec{get_timestamp} = "TIMESTAMP"; $sqlspec{from_timestamp} = "'$from'"; @@ -3633,7 +3638,7 @@ sub DbLog_Get { if($outf =~ m/(all|array)/) { $sqlspec{all} = ",TYPE,EVENT,UNIT"; $sqlspec{all_max} = ",MAX(TYPE) AS TYPE,MAX(EVENT) AS EVENT,MAX(UNIT) AS UNIT"; - } + } else { $sqlspec{all} = ""; $sqlspec{all_max} = ""; @@ -3649,7 +3654,7 @@ sub DbLog_Get { $firstv[$i] = 0; $firstd[$i] = "undef"; $lastv[$i] = 0; - $lastd[$i] = "undef"; + $lastd[$i] = "undef"; $mind[$i] = "undef"; $maxd[$i] = "undef"; $minval = (~0 >> 1); # ist "9223372036854775807" @@ -3666,17 +3671,17 @@ sub DbLog_Get { if($deltacalc) { # delta-h und delta-d , geändert V4.8.0 / 14.10.2019 $stm = "SELECT Z.TIMESTAMP, Z.DEVICE, Z.READING, Z.VALUE from "; - + $stm .= "(SELECT $sqlspec{get_timestamp} AS TIMESTAMP, DEVICE AS DEVICE, READING AS READING, VALUE AS VALUE "; - + $stm .= "FROM $current " if($inf eq "current"); $stm .= "FROM $history " if($inf eq "history"); - $stm .= "WHERE 1=1 "; - + $stm .= "WHERE 1=1 "; + $stm .= "AND DEVICE = '".$readings[$i]->[0]."' " if ($readings[$i]->[0] !~ m(\%)); $stm .= "AND DEVICE LIKE '".$readings[$i]->[0]."' " if(($readings[$i]->[0] !~ m(^\%$)) && ($readings[$i]->[0] =~ m(\%))); @@ -3684,14 +3689,14 @@ sub DbLog_Get { $stm .= "AND READING LIKE '".$readings[$i]->[1]."' " if(($readings[$i]->[1] !~ m(^%$)) && ($readings[$i]->[1] =~ m(\%))); $stm .= "AND TIMESTAMP < $sqlspec{from_timestamp} "; - $stm .= "AND TIMESTAMP > $sqlspec{day_before} "; - - $stm .= "ORDER BY TIMESTAMP DESC LIMIT 1 ) AS Z + $stm .= "AND TIMESTAMP > $sqlspec{day_before} "; + + $stm .= "ORDER BY TIMESTAMP DESC LIMIT 1 ) AS Z UNION ALL " if($readings[$i]->[3] eq "delta-h"); - - $stm .= "ORDER BY TIMESTAMP) AS Z + + $stm .= "ORDER BY TIMESTAMP) AS Z UNION ALL " if($readings[$i]->[3] eq "delta-d"); - + $stm .= "SELECT MAX($sqlspec{get_timestamp}) AS TIMESTAMP, MAX(DEVICE) AS DEVICE, @@ -3703,19 +3708,19 @@ sub DbLog_Get { $stm .= "FROM $history " if($inf eq "history"); $stm .= "WHERE 1=1 "; - + $stm .= "AND DEVICE = '".$readings[$i]->[0]."' " if ($readings[$i]->[0] !~ m(\%)); $stm .= "AND DEVICE LIKE '".$readings[$i]->[0]."' " if(($readings[$i]->[0] !~ m(^\%$)) && ($readings[$i]->[0] =~ m(\%))); $stm .= "AND READING = '".$readings[$i]->[1]."' " if ($readings[$i]->[1] !~ m(\%)); $stm .= "AND READING LIKE '".$readings[$i]->[1]."' " if(($readings[$i]->[1] !~ m(^%$)) && ($readings[$i]->[1] =~ m(\%))); - + $stm .= "AND TIMESTAMP >= $sqlspec{from_timestamp} "; - $stm .= "AND TIMESTAMP <= $sqlspec{to_timestamp} "; # 03.09.2018 : https://forum.fhem.de/index.php/topic,65860.msg815640.html#msg815640 + $stm .= "AND TIMESTAMP <= $sqlspec{to_timestamp} "; # 03.09.2018 : https://forum.fhem.de/index.php/topic,65860.msg815640.html#msg815640 $stm .= "GROUP BY $sqlspec{order_by_hour} " if($deltacalc); $stm .= "ORDER BY TIMESTAMP"; - } + } else { # kein deltacalc $stm = "SELECT @@ -3737,7 +3742,7 @@ sub DbLog_Get { $stm .= "AND READING LIKE '".$readings[$i]->[1]."' " if(($readings[$i]->[1] !~ m(^%$)) && ($readings[$i]->[1] =~ m(\%))); $stm .= "AND TIMESTAMP >= $sqlspec{from_timestamp} "; - $stm .= "AND TIMESTAMP <= $sqlspec{to_timestamp} "; # 03.09.2018 : https://forum.fhem.de/index.php/topic,65860.msg815640.html#msg815640 + $stm .= "AND TIMESTAMP <= $sqlspec{to_timestamp} "; # 03.09.2018 : https://forum.fhem.de/index.php/topic,65860.msg815640.html#msg815640 $stm .= "ORDER BY TIMESTAMP"; } @@ -3748,7 +3753,7 @@ sub DbLog_Get { if($outf =~ m/(all|array)/) { $sth->bind_columns(undef, \$sql_timestamp, \$sql_device, \$sql_reading, \$sql_value, \$type, \$event, \$unit); - } + } else { $sth->bind_columns(undef, \$sql_timestamp, \$sql_device, \$sql_reading, \$sql_value); } @@ -3757,9 +3762,9 @@ sub DbLog_Get { $retval .= "Timestamp: Device, Type, Event, Reading, Value, Unit\n"; $retval .= "=====================================================\n"; } - + #################################################################################### - # Select Auswertung + # Select Auswertung #################################################################################### my $rv = 0; while($sth->fetch()) { @@ -3767,14 +3772,14 @@ sub DbLog_Get { no warnings 'uninitialized'; # geändert V4.8.0 / 14.10.2019 my $ds = "PID: $$, TS: $sql_timestamp, DEV: $sql_device, RD: $sql_reading, VAL: $sql_value"; # geändert V4.8.0 / 14.10.2019 Log3 ($name, 5, "$name - SQL-result -> $ds"); # geändert V4.8.0 / 14.10.2019 - use warnings; # geändert V4.8.0 / 14.10.2019 + use warnings; # geändert V4.8.0 / 14.10.2019 $writeout = 0; # eingefügt V4.8.0 / 14.10.2019 ############ Auswerten des 5. Parameters: Regexp ################### # die Regexep wird vor der Function ausgewertet und der Wert im Feld # Value angepasst. # z.B.: KS300:temperature KS300:rain::delta-h KS300:rain::delta-d - # 0 1 2 3 + # 0 1 2 3 # $readings[$i][0] = Device # $readings[$i][1] = Reading # $readings[$i][2] = Default @@ -3794,11 +3799,11 @@ sub DbLog_Get { if($sql_timestamp lt $from && $deltacalc) { if(Scalar::Util::looks_like_number($sql_value)) { # nur setzen wenn numerisch - $minval = $sql_value if($sql_value < $minval || ($minval = (~0 >> 1)) ); # geändert V4.8.0 / 14.10.2019 - $maxval = $sql_value if($sql_value > $maxval || ($maxval = -(~0 >> 1)) ); # geändert V4.8.0 / 14.10.2019 + $minval = $sql_value if($sql_value < $minval || ($minval = (~0 >> 1)) ); # geändert V4.8.0 / 14.10.2019 + $maxval = $sql_value if($sql_value > $maxval || ($maxval = -(~0 >> 1)) ); # geändert V4.8.0 / 14.10.2019 $lastv[$i] = $sql_value; } - } + } else { $writeout = 0; $out_value = ""; @@ -3815,39 +3820,39 @@ sub DbLog_Get { $out_value = $1 if($sql_value =~ m/^(\d+).*/o); $out_tstamp = $sql_timestamp; $writeout = 1; - } + } elsif ($readings[$i]->[3] && $readings[$i]->[3] =~ m/^int(\d+).*/o) { # Uebernehme den Dezimalwert mit den angegebenen Stellen an Nachkommastellen $out_value = $1 if($sql_value =~ m/^([-\.\d]+).*/o); $out_tstamp = $sql_timestamp; $writeout = 1; - } + } elsif ($readings[$i]->[3] && $readings[$i]->[3] eq "delta-ts" && lc($sql_value) !~ m(ignore)) { # Berechung der vergangen Sekunden seit dem letzten Logeintrag # zb. die Zeit zwischen on/off my @a = split("[- :]", $sql_timestamp); my $akt_ts = mktime($a[5],$a[4],$a[3],$a[2],$a[1]-1,$a[0]-1900,0,0,-1); - + if($lastd[$i] ne "undef") { @a = split("[- :]", $lastd[$i]); } - + my $last_ts = mktime($a[5],$a[4],$a[3],$a[2],$a[1]-1,$a[0]-1900,0,0,-1); $out_tstamp = $sql_timestamp; $out_value = sprintf("%02d", $akt_ts - $last_ts); - + if(lc($sql_value) =~ m(hide)) { $writeout = 0; - } + } else { $writeout = 1; } - } + } elsif ($readings[$i]->[3] && $readings[$i]->[3] eq "delta-h") { # Berechnung eines Delta-Stundenwertes %tstamp = DbLog_explode_datetime($sql_timestamp, ()); if($lastd[$i] eq "undef") { %lasttstamp = DbLog_explode_datetime($sql_timestamp, ()); $lasttstamp{hour} = "00"; - } + } else { %lasttstamp = DbLog_explode_datetime($lastd[$i], ()); } @@ -3857,7 +3862,7 @@ sub DbLog_Get { # Aenderung der Stunde, Berechne Delta # wenn die Stundendifferenz größer 1 ist muss ein Dummyeintrag erstellt werden $retvaldummy = ""; - + if(($tstamp{hour}-$lasttstamp{hour}) > 1) { for (my $j = $lasttstamp{hour}+1; $j < $tstamp{hour}; $j++) { $out_value = "0"; @@ -3868,17 +3873,17 @@ sub DbLog_Get { if ($outf =~ m/(all)/) { # Timestamp: Device, Type, Event, Reading, Value, Unit $retvaldummy .= sprintf("%s: %s, %s, %s, %s, %s, %s\n", $out_tstamp, $sql_device, $type, $event, $sql_reading, $out_value, $unit); - + } elsif ($outf =~ m/(array)/) { push(@ReturnArray, {"tstamp" => $out_tstamp, "device" => $sql_device, "type" => $type, "event" => $event, "reading" => $sql_reading, "value" => $out_value, "unit" => $unit}); - } + } else { $out_tstamp =~ s/\ /_/g; #needed by generating plots $retvaldummy .= "$out_tstamp $out_value\n"; } } } - + if(($tstamp{hour}-$lasttstamp{hour}) < 0) { for (my $j=0; $j < $tstamp{hour}; $j++) { $out_value = "0"; @@ -3886,81 +3891,81 @@ sub DbLog_Get { $hour = '0'.$j if $j<10; $cnt[$i]++; $out_tstamp = DbLog_implode_datetime($tstamp{year}, $tstamp{month}, $tstamp{day}, $hour, "30", "00"); - + if ($outf =~ m/(all)/) { # Timestamp: Device, Type, Event, Reading, Value, Unit $retvaldummy .= sprintf("%s: %s, %s, %s, %s, %s, %s\n", $out_tstamp, $sql_device, $type, $event, $sql_reading, $out_value, $unit); - } + } elsif ($outf =~ m/(array)/) { push(@ReturnArray, {"tstamp" => $out_tstamp, "device" => $sql_device, "type" => $type, "event" => $event, "reading" => $sql_reading, "value" => $out_value, "unit" => $unit}); - } + } else { $out_tstamp =~ s/\ /_/g; # needed by generating plots $retvaldummy .= "$out_tstamp $out_value\n"; } } } - + $writeout = 1 if($minval != (~0 >> 1) && $maxval != -(~0 >> 1)); # geändert V4.8.0 / 14.10.2019 $out_value = ($writeout == 1) ? sprintf("%g", $maxval - $minval) : 0; # if there was no previous reading in the selected time range, produce a null delta, %g - a floating-point number - + $sum[$i] += $out_value; $cnt[$i]++; $out_tstamp = DbLog_implode_datetime($lasttstamp{year}, $lasttstamp{month}, $lasttstamp{day}, $lasttstamp{hour}, "30", "00"); - + $minval = $maxval if($maxval != -(~0 >> 1)); # only use the current range's maximum as the new minimum if a proper value was found - + Log3 ($name, 5, "$name - Output delta-h -> TS: $tstamp{hour}, LASTTS: $lasttstamp{hour}, OUTTS: $out_tstamp, OUTVAL: $out_value, WRITEOUT: $writeout"); } - } + } elsif ($readings[$i]->[3] && $readings[$i]->[3] eq "delta-d") { # Berechnung eines Tages-Deltas %tstamp = DbLog_explode_datetime($sql_timestamp, ()); - + if($lastd[$i] eq "undef") { %lasttstamp = DbLog_explode_datetime($sql_timestamp, ()); - } + } else { %lasttstamp = DbLog_explode_datetime($lastd[$i], ()); } - + if("$tstamp{day}" ne "$lasttstamp{day}") { # Aenderung des Tages, berechne Delta $writeout = 1 if($minval != (~0 >> 1) && $maxval != -(~0 >> 1)); # geändert V4.8.0 / 14.10.2019 $out_value = ($writeout == 1) ? sprintf("%g", $maxval - $minval) : 0; # if there was no previous reading in the selected time range, produce a null delta, %g - a floating-point number $sum[$i] += $out_value; $cnt[$i]++; - + $out_tstamp = DbLog_implode_datetime($lasttstamp{year}, $lasttstamp{month}, $lasttstamp{day}, "12", "00", "00"); $minval = $maxval if($maxval != -(~0 >> 1)); # only use the current range's maximum as the new minimum if a proper value was found Log3 ($name, 5, "$name - Output delta-d -> TS: $tstamp{day}, LASTTS: $lasttstamp{day}, OUTTS: $out_tstamp, OUTVAL: $out_value, WRITEOUT: $writeout"); } - } + } else { $out_value = $sql_value; $out_tstamp = $sql_timestamp; - $writeout = 1; + $writeout = 1; } # Wenn Attr SuppressUndef gesetzt ist, dann ausfiltern aller undef-Werte $writeout = 0 if (!defined($sql_value) && AttrVal($hash->{NAME}, "suppressUndef", 0)); - + ###################### Ausgabe ########################### if($writeout) { if ($outf =~ m/(all)/) { # Timestamp: Device, Type, Event, Reading, Value, Unit $retval .= sprintf("%s: %s, %s, %s, %s, %s, %s\n", $out_tstamp, $sql_device, $type, $event, $sql_reading, $out_value, $unit); $retval .= $retvaldummy; - - } + + } elsif ($outf =~ m/(array)/) { push(@ReturnArray, {"tstamp" => $out_tstamp, "device" => $sql_device, "type" => $type, "event" => $event, "reading" => $sql_reading, "value" => $out_value, "unit" => $unit}); - } + } else { # generating plots $out_tstamp =~ s/\ /_/g; # needed by generating plots $retval .= "$out_tstamp $out_value\n"; $retval .= $retvaldummy; } - } + } if(Scalar::Util::looks_like_number($sql_value)) { # nur setzen wenn numerisch @@ -3976,28 +3981,28 @@ sub DbLog_Get { } } $maxval = $sql_value; - } + } else { if($firstd[$i] eq "undef") { $firstv[$i] = $sql_value; $firstd[$i] = $sql_timestamp; } - + if($sql_value < $min[$i]) { $min[$i] = $sql_value; $mind[$i] = $sql_timestamp; } - + if($sql_value > $max[$i]) { $max[$i] = $sql_value; $maxd[$i] = $sql_timestamp; } - + $sum[$i] += $sql_value; $minval = $sql_value if($sql_value < $minval); $maxval = $sql_value if($sql_value > $maxval); } - } + } else { $min[$i] = 0; $max[$i] = 0; @@ -4005,43 +4010,43 @@ sub DbLog_Get { $minval = 0; $maxval = 0; } - + if(!$deltacalc) { $cnt[$i]++; $lastv[$i] = $sql_value; - } + } else { $lastv[$i] = $out_value if($out_value); } - + $lastd[$i] = $sql_timestamp; } - } - ##### while fetchrow Ende ##### + } + ##### while fetchrow Ende ##### Log3 ($name, 4, "$name - PID: $$, rows count: $rv"); - + ######## den letzten Abschlusssatz rausschreiben ########## - + if($readings[$i]->[3] && ($readings[$i]->[3] eq "delta-h" || $readings[$i]->[3] eq "delta-d")) { if($lastd[$i] eq "undef") { $out_value = "0"; $out_tstamp = DbLog_implode_datetime($from_datetime{year}, $from_datetime{month}, $from_datetime{day}, $from_datetime{hour}, "30", "00") if($readings[$i]->[3] eq "delta-h"); $out_tstamp = DbLog_implode_datetime($from_datetime{year}, $from_datetime{month}, $from_datetime{day}, "12", "00", "00") if($readings[$i]->[3] eq "delta-d"); - } + } else { %lasttstamp = DbLog_explode_datetime($lastd[$i], ()); - + $out_value = ($minval != (~0 >> 1) && $maxval != -(~0 >> 1)) ? sprintf("%g", $maxval - $minval) : 0; # if there was no previous reading in the selected time range, produce a null delta - + $out_tstamp = DbLog_implode_datetime($lasttstamp{year}, $lasttstamp{month}, $lasttstamp{day}, $lasttstamp{hour}, "30", "00") if($readings[$i]->[3] eq "delta-h"); $out_tstamp = DbLog_implode_datetime($lasttstamp{year}, $lasttstamp{month}, $lasttstamp{day}, "12", "00", "00") if($readings[$i]->[3] eq "delta-d"); - } + } $sum[$i] += $out_value; $cnt[$i]++; - + if($outf =~ m/(all)/) { $retval .= sprintf("%s: %s %s %s %s %s %s\n", $out_tstamp, $sql_device, $type, $event, $sql_reading, $out_value, $unit); - } + } elsif ($outf =~ m/(array)/) { push(@ReturnArray, {"tstamp" => $out_tstamp, "device" => $sql_device, "type" => $type, "event" => $event, "reading" => $sql_reading, "value" => $out_value, "unit" => $unit}); } @@ -4049,10 +4054,10 @@ sub DbLog_Get { $out_tstamp =~ s/\ /_/g; #needed by generating plots $retval .= "$out_tstamp $out_value\n"; } - + Log3 ($name, 5, "$name - Output last DS -> OUTTS: $out_tstamp, OUTVAL: $out_value, WRITEOUT: implicit "); } - + # Datentrenner setzen $retval .= "#$readings[$i]->[0]"; $retval .= ":"; @@ -4064,7 +4069,7 @@ sub DbLog_Get { $retval .= ":"; $retval .= "$readings[$i]->[4]" if($readings[$i]->[4]); $retval .= "\n"; - + } # Ende for @readings-Schleife über alle Readinggs im get # Ueberfuehren der gesammelten Werte in die globale Variable %data @@ -4088,16 +4093,16 @@ sub DbLog_Get { # cleanup (plotfork) connection # $dbh->disconnect() if( $hash->{PID} != $$ ); - + $dbh->disconnect() if($nh || $hash->{PID} != $$); if($internal) { $internal_data = \$retval; return undef; - } + } elsif($outf =~ m/(array)/) { return @ReturnArray; - } + } else { $retval = Encode::encode_utf8($retval) if($utf8); # Log3 $name, 5, "DbLog $name -> Result of get:\n$retval"; @@ -4118,16 +4123,16 @@ sub DbLog_configcheck { my $dbname = (split(/;|=/, $dbconn))[1]; my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; - + my ($check, $rec,%dbconfig); - + ### Version check - ####################################################################### + ####################################################################### my $pv = sprintf("%vd",$^V); # Perl Version my $dbi = $DBI::VERSION; # DBI Version my %drivers = DBI->installed_drivers(); my $dv = ""; - + if($dbmodel =~ /MYSQL/xi) { for (keys %drivers) { $dv = $_ if($_ =~ /mysql|mariadb/x); @@ -4136,54 +4141,54 @@ sub DbLog_configcheck { my $dbd = ($dbmodel =~ /POSTGRESQL/xi) ? "Pg: ".$DBD::Pg::VERSION: # DBD Version ($dbmodel =~ /MYSQL/xi && $dv) ? "$dv: ".$DBD::mysql::VERSION: ($dbmodel =~ /SQLITE/xi) ? "SQLite: ".$DBD::SQLite::VERSION:"Undefined"; - + my $dbdhint = ""; - my $dbdupd = 0; - + my $dbdupd = 0; + if($dbmodel =~ /MYSQL/xi && $dv) { # check DBD Mindest- und empfohlene Version my $dbdver = $DBD::mysql::VERSION * 1; # String to Zahl Konversion if($dbdver < 4.032) { $dbdhint = "Caution: Your DBD version doesn't support UTF8. "; $dbdupd = 1; - } + } elsif ($dbdver < 4.042) { - $dbdhint = "Caution: Full UTF-8 support exists from DBD version 4.032, but installing DBD version 4.042 is highly suggested. "; + $dbdhint = "Caution: Full UTF-8 support exists from DBD version 4.032, but installing DBD version 4.042 is highly suggested. "; $dbdupd = 1; - } + } else { $dbdhint = "Your DBD version fulfills UTF8 support, no need to update DBD."; } } - + my ($errcm,$supd,$uptb) = DbLog_checkModVer($name); # DbLog Version - + $check = ""; $check .= "Result of version check

"; $check .= "Used Perl version: $pv
"; $check .= "Used DBI (Database independent interface) version: $dbi
"; $check .= "Used DBD (Database driver) version $dbd
"; - + if($errcm) { $check .= "Recommendation: ERROR - $errcm. $dbdhint

"; } - + if($supd) { $check .= "Used DbLog version: $hash->{HELPER}{VERSION}.
$uptb
"; $check .= "Recommendation: You should update FHEM to get the recent DbLog version from repository ! $dbdhint

"; - } + } else { $check .= "Used DbLog version: $hash->{HELPER}{VERSION}.
$uptb
"; - $check .= "Recommendation: No update of DbLog is needed. $dbdhint

"; + $check .= "Recommendation: No update of DbLog is needed. $dbdhint

"; } - + ### Configuration read check ####################################################################### $check .= "Result of configuration read check

"; my $st = configDBUsed() ? "configDB (don't forget upload configuration file if changed. Use \"configdb filelist\" and look for your configuration file.)" : "file"; $check .= "Connection parameter store type: $st
"; - + my ($err, @config) = FileRead($hash->{CONFIGURATION}); - + if (!$err) { eval join("\n", @config); $rec = "parameter: "; @@ -4193,65 +4198,65 @@ sub DbLog_configcheck { $rec .= "User -> ".$dbconfig{user}.", " if (defined $dbconfig{user}); $rec .= "Password -> could not read " if (!defined $dbconfig{password}); $rec .= "Password -> read o.k. " if (defined $dbconfig{password}); - } + } else { $rec = $err; } $check .= "Connection $rec

"; - + ### Connection und Encoding check ####################################################################### my (@ce,@se); my ($chutf8mod,$chutf8dat); - + if($dbmodel =~ /MYSQL/) { @ce = DbLog_sqlget($hash,"SHOW VARIABLES LIKE 'character_set_connection'"); $chutf8mod = @ce ? uc($ce[1]) : "no result"; @se = DbLog_sqlget($hash,"SHOW VARIABLES LIKE 'character_set_database'"); $chutf8dat = @se ? uc($se[1]) : "no result"; - + if($chutf8mod eq $chutf8dat) { $rec = "settings o.k."; - } + } else { $rec = "Both encodings should be identical. You can adjust the usage of UTF8 connection by setting the UTF8 parameter in file '$hash->{CONFIGURATION}' to the right value. "; } if(uc($chutf8mod) ne "UTF8" && uc($chutf8dat) ne "UTF8") { $dbdhint = ""; - } + } else { $dbdhint .= " If you want use UTF8 database option, you must update DBD (Database driver) to at least version 4.032. " if($dbdupd); } - + } if($dbmodel =~ /POSTGRESQL/) { @ce = DbLog_sqlget($hash,"SHOW CLIENT_ENCODING"); $chutf8mod = @ce ? uc($ce[0]) : "no result"; @se = DbLog_sqlget($hash,"select character_set_name from information_schema.character_sets"); $chutf8dat = @se ? uc($se[0]) : "no result"; - + if($chutf8mod eq $chutf8dat) { $rec = "settings o.k."; - } + } else { $rec = "This is only an information. PostgreSQL supports automatic character set conversion between server and client for certain character set combinations. The conversion information is stored in the pg_conversion system catalog. PostgreSQL comes with some predefined conversions."; } - } + } if($dbmodel =~ /SQLITE/) { @ce = DbLog_sqlget($hash,"PRAGMA encoding"); $chutf8dat = @ce ? uc($ce[0]) : "no result"; @se = DbLog_sqlget($hash,"PRAGMA table_info($history)"); $rec = "This is only an information about text encoding used by the main database."; - } - + } + $check .= "Result of connection check

"; - + if(@ce && @se) { $check .= "Connection to database $dbname successfully done.
"; $check .= "Recommendation: settings o.k.

"; } - + if(!@ce || !@se) { $check .= "Connection to database was not successful.
"; $check .= "Recommendation: Plese check logfile for further information.

"; @@ -4262,27 +4267,27 @@ sub DbLog_configcheck { $check .= "Encoding used by Client (connection): $chutf8mod
" if($dbmodel !~ /SQLITE/); $check .= "Encoding used by DB $dbname: $chutf8dat
"; $check .= "Recommendation: $rec $dbdhint

"; - + ### Check Betriebsmodus ####################################################################### my $mode = $hash->{MODE}; my $bi = AttrVal($name, "bulkInsert", 0); my $sfx = AttrVal("global", "language", "EN"); $sfx = ($sfx eq "EN" ? "" : "_$sfx"); - + $check .= "Result of logmode check

"; $check .= "Logmode of DbLog-device $name is: $mode
"; if($mode =~ /asynchronous/) { my $max = AttrVal("global", "blockingCallMax", 0); - + if(!$max || $max >= 6) { $rec = "settings o.k."; - } + } else { $rec = "WARNING - you are running asynchronous mode that is recommended, but the value of global device attribute \"blockingCallMax\" is set quite small.
"; - $rec .= "This may cause problems in operation. It is recommended to increase the global blockingCallMax attribute."; - } - } + $rec .= "This may cause problems in operation. It is recommended to increase the global blockingCallMax attribute."; + } + } else { $rec = "Switch $name to the asynchronous logmode by setting the 'asyncMode' attribute. The advantage of this mode is to log events non-blocking.
"; $rec .= "There are attributes 'syncInterval' and 'cacheLimit' relevant for this working mode.
"; @@ -4296,30 +4301,30 @@ sub DbLog_configcheck { $check .= "Insert mode of DbLog-device $name is: $bi
"; $rec = "Setting attribute \"bulkInsert\" to \"1\" may result a higher write performance in most cases. "; $rec .= "Feel free to try this mode."; - } + } else { $bi = "Bulk"; - $check .= "Insert mode of DbLog-device $name is: $bi
"; - $rec = "settings o.k."; - } + $check .= "Insert mode of DbLog-device $name is: $bi
"; + $rec = "settings o.k."; + } $check .= "Recommendation: $rec

"; - + ### Check Plot Erstellungsmodus ####################################################################### $check .= "Result of plot generation method check

"; my @webdvs = devspec2array("TYPE=FHEMWEB:FILTER=STATE=Initialized"); my ($forks,$emb) = (1,1); my $wall = ""; - + for my $web (@webdvs) { my $pf = AttrVal($web,"plotfork",0); my $pe = AttrVal($web,"plotEmbed",0); $forks = 0 if(!$pf); $emb = 0 if($pe =~ /[01]/); - + if(!$pf || $pe =~ /[01]/) { $wall .= "".$web.": plotfork=".$pf." / plotEmbed=".$pe."
"; - } + } else { $wall .= $web.": plotfork=".$pf." / plotEmbed=".$pe."
"; } @@ -4330,20 +4335,20 @@ sub DbLog_configcheck { $rec = "You should set attribute \"plotfork = 1\" and \"plotEmbed = 2\" in relevant devices. ". "If these attributes are not set, blocking situations may occure when creating plots. ". "Note: Your system must have sufficient memory to handle parallel running Perl processes. See also global attribute \"blockingCallMax\".
" - } + } else { $check .= $wall; $rec = "settings o.k."; - } + } $check .= "
"; - $check .= "Recommendation: $rec

"; - + $check .= "Recommendation: $rec

"; + ### Check Spaltenbreite history ####################################################################### my (@sr_dev,@sr_typ,@sr_evt,@sr_rdg,@sr_val,@sr_unt); my ($cdat_dev,$cdat_typ,$cdat_evt,$cdat_rdg,$cdat_val,$cdat_unt); my ($cmod_dev,$cmod_typ,$cmod_evt,$cmod_rdg,$cmod_val,$cmod_unt); - + if($dbmodel =~ /MYSQL/) { @sr_dev = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='DEVICE'"); @sr_typ = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='TYPE'"); @@ -4362,7 +4367,7 @@ sub DbLog_configcheck { @sr_rdg = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and table_schema='$sch' and column_name='reading'"); @sr_val = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and table_schema='$sch' and column_name='value'"); @sr_unt = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and table_schema='$sch' and column_name='unit'"); - } + } else { @sr_dev = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and column_name='device'"); @sr_typ = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and column_name='type'"); @@ -4370,7 +4375,7 @@ sub DbLog_configcheck { @sr_rdg = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and column_name='reading'"); @sr_val = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and column_name='value'"); @sr_unt = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$h' and column_name='unit'"); - + } } if($dbmodel =~ /SQLITE/) { @@ -4384,9 +4389,9 @@ sub DbLog_configcheck { ($cdat_val) = $cdat_val =~ /VALUE.varchar\(([\d]+)\)/x; ($cdat_unt) = $cdat_unt =~ /UNIT.varchar\(([\d]+)\)/x; } - if ($dbmodel !~ /SQLITE/) { + if ($dbmodel !~ /SQLITE/) { $cdat_dev = @sr_dev ? ($sr_dev[1]) : "no result"; - $cdat_dev =~ tr/varchar\(|\)//d if($cdat_dev ne "no result"); + $cdat_dev =~ tr/varchar\(|\)//d if($cdat_dev ne "no result"); $cdat_typ = @sr_typ ? ($sr_typ[1]) : "no result"; $cdat_typ =~ tr/varchar\(|\)//d if($cdat_typ ne "no result"); $cdat_evt = @sr_evt ? ($sr_evt[1]) : "no result"; @@ -4404,10 +4409,10 @@ sub DbLog_configcheck { $cmod_rdg = $hash->{HELPER}{READINGCOL}; $cmod_val = $hash->{HELPER}{VALUECOL}; $cmod_unt = $hash->{HELPER}{UNITCOL}; - + if($cdat_dev >= $cmod_dev && $cdat_typ >= $cmod_typ && $cdat_evt >= $cmod_evt && $cdat_rdg >= $cmod_rdg && $cdat_val >= $cmod_val && $cdat_unt >= $cmod_unt) { $rec = "settings o.k."; - } + } else { if ($dbmodel !~ /SQLITE/) { $rec = "The relation between column width in table $history and the field width used in device $name don't meet the requirements. "; @@ -4422,14 +4427,14 @@ sub DbLog_configcheck { $rec .= "You can change the column width in database by a statement like 'alter table $history modify VALUE varchar(128);' (example for changing field 'VALUE'). "; $rec .= "You can do it for example by executing 'sqlCmd' in DbRep or in a SQL-Editor of your choice. (switch $name to asynchron mode for non-blocking).
"; $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; - } + } else { $rec = "WARNING - The relation between column width in table $history and the field width used by device $name should be equal but it differs."; $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; } } - + $check .= "Result of table '$history' check

"; $check .= "Column width set in DB $history: 'DEVICE' = $cdat_dev, 'TYPE' = $cdat_typ, 'EVENT' = $cdat_evt, 'READING' = $cdat_rdg, 'VALUE' = $cdat_val, 'UNIT' = $cdat_unt
"; $check .= "Column width used by $name: 'DEVICE' = $cmod_dev, 'TYPE' = $cmod_typ, 'EVENT' = $cmod_evt, 'READING' = $cmod_rdg, 'VALUE' = $cmod_val, 'UNIT' = $cmod_unt
"; @@ -4445,7 +4450,7 @@ sub DbLog_configcheck { @sr_val = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='VALUE'"); @sr_unt = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='UNIT'"); } - + if($dbmodel =~ /POSTGRESQL/) { my $sch = AttrVal($name, "dbSchema", ""); my $c = "current"; @@ -4456,7 +4461,7 @@ sub DbLog_configcheck { @sr_rdg = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and table_schema='$sch' and column_name='reading'"); @sr_val = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and table_schema='$sch' and column_name='value'"); @sr_unt = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and table_schema='$sch' and column_name='unit'"); - } + } else { @sr_dev = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and column_name='device'"); @sr_typ = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and column_name='type'"); @@ -4464,7 +4469,7 @@ sub DbLog_configcheck { @sr_rdg = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and column_name='reading'"); @sr_val = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and column_name='value'"); @sr_unt = DbLog_sqlget($hash,"select column_name,character_maximum_length from information_schema.columns where table_name='$c' and column_name='unit'"); - + } } if($dbmodel =~ /SQLITE/) { @@ -4478,19 +4483,19 @@ sub DbLog_configcheck { ($cdat_val) = $cdat_val =~ /VALUE.varchar\(([\d]+)\)/x; ($cdat_unt) = $cdat_unt =~ /UNIT.varchar\(([\d]+)\)/x; } - if ($dbmodel !~ /SQLITE/) { + if ($dbmodel !~ /SQLITE/) { $cdat_dev = @sr_dev ? ($sr_dev[1]) : "no result"; - $cdat_dev =~ tr/varchar\(|\)//d if($cdat_dev ne "no result"); + $cdat_dev =~ tr/varchar\(|\)//d if($cdat_dev ne "no result"); $cdat_typ = @sr_typ ? ($sr_typ[1]) : "no result"; - $cdat_typ =~ tr/varchar\(|\)//d if($cdat_typ ne "no result"); + $cdat_typ =~ tr/varchar\(|\)//d if($cdat_typ ne "no result"); $cdat_evt = @sr_evt ? ($sr_evt[1]) : "no result"; - $cdat_evt =~ tr/varchar\(|\)//d if($cdat_evt ne "no result"); + $cdat_evt =~ tr/varchar\(|\)//d if($cdat_evt ne "no result"); $cdat_rdg = @sr_rdg ? ($sr_rdg[1]) : "no result"; - $cdat_rdg =~ tr/varchar\(|\)//d if($cdat_rdg ne "no result"); + $cdat_rdg =~ tr/varchar\(|\)//d if($cdat_rdg ne "no result"); $cdat_val = @sr_val ? ($sr_val[1]) : "no result"; - $cdat_val =~ tr/varchar\(|\)//d if($cdat_val ne "no result"); + $cdat_val =~ tr/varchar\(|\)//d if($cdat_val ne "no result"); $cdat_unt = @sr_unt ? ($sr_unt[1]) : "no result"; - $cdat_unt =~ tr/varchar\(|\)//d if($cdat_unt ne "no result"); + $cdat_unt =~ tr/varchar\(|\)//d if($cdat_unt ne "no result"); } $cmod_dev = $hash->{HELPER}{DEVICECOL}; $cmod_typ = $hash->{HELPER}{TYPECOL}; @@ -4498,10 +4503,10 @@ sub DbLog_configcheck { $cmod_rdg = $hash->{HELPER}{READINGCOL}; $cmod_val = $hash->{HELPER}{VALUECOL}; $cmod_unt = $hash->{HELPER}{UNITCOL}; - + if($cdat_dev >= $cmod_dev && $cdat_typ >= $cmod_typ && $cdat_evt >= $cmod_evt && $cdat_rdg >= $cmod_rdg && $cdat_val >= $cmod_val && $cdat_unt >= $cmod_unt) { $rec = "settings o.k."; - } + } else { if ($dbmodel !~ /SQLITE/) { $rec = "The relation between column width in table $current and the field width used in device $name don't meet the requirements. "; @@ -4516,26 +4521,26 @@ sub DbLog_configcheck { $rec .= "You can change the column width in database by a statement like 'alter table $current modify VALUE varchar(128);' (example for changing field 'VALUE'). "; $rec .= "You can do it for example by executing 'sqlCmd' in DbRep or in a SQL-Editor of your choice. (switch $name to asynchron mode for non-blocking).
"; $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; - } + } else { $rec = "WARNING - The relation between column width in table $current and the field width used by device $name should be equal but it differs. "; $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; } } - + $check .= "Result of table '$current' check

"; $check .= "Column width set in DB $current: 'DEVICE' = $cdat_dev, 'TYPE' = $cdat_typ, 'EVENT' = $cdat_evt, 'READING' = $cdat_rdg, 'VALUE' = $cdat_val, 'UNIT' = $cdat_unt
"; $check .= "Column width used by $name: 'DEVICE' = $cmod_dev, 'TYPE' = $cmod_typ, 'EVENT' = $cmod_evt, 'READING' = $cmod_rdg, 'VALUE' = $cmod_val, 'UNIT' = $cmod_unt
"; $check .= "Recommendation: $rec

"; #} - + ### Check Vorhandensein Search_Idx mit den empfohlenen Spalten ####################################################################### my (@six,@six_dev,@six_rdg,@six_tsp); my ($idef,$idef_dev,$idef_rdg,$idef_tsp); $check .= "Result of check 'Search_Idx' availability

"; - + if($dbmodel =~ /MYSQL/) { @six = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx'"); if (!@six) { @@ -4544,17 +4549,17 @@ sub DbLog_configcheck { $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } + } else { @six_dev = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='DEVICE'"); @six_rdg = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='READING'"); @six_tsp = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='TIMESTAMP'"); - + if (@six_dev && @six_rdg && @six_tsp) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; - } - else { + } + else { $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!@six_dev); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@six_rdg); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@six_tsp); @@ -4567,25 +4572,25 @@ sub DbLog_configcheck { } if($dbmodel =~ /POSTGRESQL/) { @six = DbLog_sqlget($hash,"SELECT * FROM pg_indexes WHERE tablename='$history' and indexname ='Search_Idx'"); - + if (!@six) { $check .= "The index 'Search_Idx' is missing.
"; $rec = "You can create the index by executing statement 'CREATE INDEX \"Search_Idx\" ON $history USING btree (device, reading, \"timestamp\")'
"; $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } + } else { $idef = $six[4]; $idef_dev = 1 if($idef =~ /device/); $idef_rdg = 1 if($idef =~ /reading/); $idef_tsp = 1 if($idef =~ /timestamp/); - + if ($idef_dev && $idef_rdg && $idef_tsp) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'READING', 'TIMESTAMP'.
"; $rec = "settings o.k."; - } - else { + } + else { $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); @@ -4598,25 +4603,25 @@ sub DbLog_configcheck { } if($dbmodel =~ /SQLITE/) { @six = DbLog_sqlget($hash,"SELECT name,sql FROM sqlite_master WHERE type='index' AND name='Search_Idx'"); - + if (!$six[0]) { $check .= "The index 'Search_Idx' is missing.
"; $rec = "You can create the index by executing statement 'CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP)'
"; $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } + } else { $idef = $six[1]; $idef_dev = 1 if(lc($idef) =~ /device/); $idef_rdg = 1 if(lc($idef) =~ /reading/); $idef_tsp = 1 if(lc($idef) =~ /timestamp/); - + if ($idef_dev && $idef_rdg && $idef_tsp) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'READING', 'TIMESTAMP'.
"; $rec = "settings o.k."; - } - else { + } + else { $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); @@ -4627,16 +4632,16 @@ sub DbLog_configcheck { } } } - + $check .= "Recommendation: $rec

"; - + ### Check Index Report_Idx für DbRep-Device falls DbRep verwendet wird ####################################################################### my (@dix,@dix_rdg,@dix_tsp,$irep_rdg,$irep_tsp,$irep); my $isused = 0; my @repdvs = devspec2array("TYPE=DbRep"); $check .= "Result of check 'Report_Idx' availability for DbRep-devices

"; - + for my $dbrp (@repdvs) { if(!$defs{$dbrp}) { Log3 ($name, 2, "DbLog $name -> Device '$dbrp' found by configCheck doesn't exist !"); @@ -4645,30 +4650,30 @@ sub DbLog_configcheck { if ($defs{$dbrp}->{DEF} eq $name) { # DbRep Device verwendet aktuelles DbLog-Device Log3 ($name, 5, "DbLog $name -> DbRep-Device '$dbrp' uses $name."); - $isused = 1; + $isused = 1; } } if ($isused) { if($dbmodel =~ /MYSQL/) { @dix = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx'"); - + if (!@dix) { $check .= "At least one DbRep-device assigned to $name is used, but the recommended index 'Report_Idx' is missing.
"; $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING) USING BTREE;'
"; $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; - } + } else { @dix_rdg = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx' and Column_name='READING'"); @dix_tsp = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx' and Column_name='TIMESTAMP'"); - + if (@dix_rdg && @dix_tsp) { $check .= "At least one DbRep-device assigned to $name is used. "; $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; - } - else { + } + else { $check .= "You use at least one DbRep-device assigned to $name. "; $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@dix_rdg); $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@dix_tsp); @@ -4681,24 +4686,24 @@ sub DbLog_configcheck { } if($dbmodel =~ /POSTGRESQL/) { @dix = DbLog_sqlget($hash,"SELECT * FROM pg_indexes WHERE tablename='$history' and indexname ='Report_Idx'"); - + if (!@dix) { $check .= "You use at least one DbRep-device assigned to $name, but the recommended index 'Report_Idx' is missing.
"; $rec = "You can create the index by executing statement 'CREATE INDEX \"Report_Idx\" ON $history USING btree (\"timestamp\", reading)'
"; $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; - } + } else { $irep = $dix[4]; $irep_rdg = 1 if($irep =~ /reading/); $irep_tsp = 1 if($irep =~ /timestamp/); - + if ($irep_rdg && $irep_tsp) { $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; - } - else { + } + else { $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; @@ -4710,23 +4715,23 @@ sub DbLog_configcheck { } if($dbmodel =~ /SQLITE/) { @dix = DbLog_sqlget($hash,"SELECT name,sql FROM sqlite_master WHERE type='index' AND name='Report_Idx'"); - + if (!$dix[0]) { $check .= "The index 'Report_Idx' is missing.
"; $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING)'
"; $rec .= "Depending on your database size this command may running a long time.
"; $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } + } else { $irep = $dix[1]; $irep_rdg = 1 if(lc($irep) =~ /reading/); $irep_tsp = 1 if(lc($irep) =~ /timestamp/); - + if ($irep_rdg && $irep_tsp) { $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; - } + } else { $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); @@ -4737,14 +4742,14 @@ sub DbLog_configcheck { } } } - - } + + } else { $check .= "No DbRep-device assigned to $name is used. Hence an index for DbRep isn't needed.
"; $rec = "settings o.k."; } $check .= "Recommendation: $rec

"; - + $check .= ""; return $check; @@ -4761,18 +4766,18 @@ sub DbLog_checkModVer { Log3 $name, 1, "DbLog $name -> configCheck: Cannot parse $src, probably not a valid http control file"; return ("check of new DbLog version not possible, see logfile."); } - + my $basePath = $1; my $ctrlFileName = $2; my ($remCtrlFile, $err) = DbLog_updGetUrl($name,$src); return ("check of new DbLog version not possible: $err") if($err); - + if(!$remCtrlFile) { Log3 $name, 1, "DbLog $name -> configCheck: No valid remote control file"; return ("check of new DbLog version not possible, see logfile."); } - + my @remList = split(/\R/, $remCtrlFile); Log3 $name, 4, "DbLog $name -> configCheck: Got remote $ctrlFileName with ".int(@remList)." entries."; @@ -4783,12 +4788,12 @@ sub DbLog_checkModVer { @locList = map { $_ =~ s/[\r\n]//; $_ } ; close(FD); Log3 $name, 4, "DbLog $name -> configCheck: Got local $ctrlFileName with ".int(@locList)." entries."; - } + } else { Log3 $name, 1, "DbLog $name -> configCheck: can't open $root/FHEM/$ctrlFileName: $!"; - return ("check of new DbLog version not possible, see logfile."); + return ("check of new DbLog version not possible, see logfile."); } - + my %lh; foreach my $l (@locList) { my @l = split(" ", $l, 4); @@ -4797,9 +4802,9 @@ sub DbLog_checkModVer { $lh{$l[3]}{LEN} = $l[2]; Log3 $name, 4, "DbLog $name -> configCheck: local version from last update - creation time: ".$lh{$l[3]}{TS}." - bytes: ".$lh{$l[3]}{LEN}; } - + my $noSzCheck = AttrVal("global", "updateNoFileCheck", configDBUsed()); - + for my $rem (@remList) { my @r = split(" ", $rem, 4); next if($r[0] ne "UPD" || $r[3] !~ /93_DbLog/); @@ -4819,20 +4824,20 @@ sub DbLog_checkModVer { } last; } - + return ("",0,"Your local DbLog module is up to date."); } ################################### sub DbLog_updGetUrl { my ($name,$url) = @_; - + my %upd_connecthash; - + $url =~ s/%/%25/g; $upd_connecthash{url} = $url; $upd_connecthash{keepalive} = ($url =~ m/localUpdate/ ? 0 : 1); # Forum #49798 - + my ($err, $data) = HttpUtils_BlockingGet(\%upd_connecthash); if($err) { Log3 $name, 1, "DbLog $name -> configCheck: ERROR while connecting to fhem.de: $err"; @@ -4843,7 +4848,7 @@ sub DbLog_updGetUrl { $err = 1; return ("",$err); } - + return ($data,""); } @@ -4853,15 +4858,15 @@ return ($data,""); sub DbLog_sqlget { my ($hash,$sql) = @_; my $name = $hash->{NAME}; - + my ($dbh,$sth,@sr); - + Log3 ($name, 4, "DbLog $name - Executing SQL: $sql"); - + $dbh = DbLog_ConnectNewDBH($hash); return if(!$dbh); - - eval { $sth = $dbh->prepare("$sql"); + + eval { $sth = $dbh->prepare("$sql"); $sth->execute; }; if($@) { @@ -4869,15 +4874,15 @@ sub DbLog_sqlget { Log3 ($name, 2, "DbLog $name - $@"); return @sr; } - - @sr = $sth->fetchrow; - + + @sr = $sth->fetchrow; + $sth->finish; $dbh->disconnect; no warnings 'uninitialized'; Log3 ($name, 4, "DbLog $name - SQL result: @sr"); use warnings; - + return @sr; } @@ -4892,91 +4897,91 @@ sub DbLog_AddLog { my $async = AttrVal($name, "asyncMode", undef); my $value_fn = AttrVal( $name, "valueFn", "" ); my $ce = AttrVal($name, "cacheEvents", 0); - - my ($dev_type,$dev_name,$dev_reading,$read_val,$event,$ut); - my @row_array; + + my ($dev_type,$dev_name,$dev_reading,$read_val,$event,$ut); + my @row_array; my $ts; return if(IsDisabled($name) || !$hash->{HELPER}{COLSET} || $init_done != 1); - + # Funktion aus Attr valueFn validieren if( $value_fn =~ m/^\s*(\{.*\})\s*$/s ) { $value_fn = $1; - } + } else { $value_fn = ''; } - - my $now = gettimeofday(); + + my $now = gettimeofday(); my $rdspec = (split ":",$devrdspec)[-1]; my @dc = split(":",$devrdspec); pop @dc; my $devspec = join(':',@dc); my @exdvs = devspec2array($devspec); - + Log3 $name, 4, "DbLog $name -> Addlog known devices by devspec: @exdvs"; - + foreach (@exdvs) { $dev_name = $_; if(!$defs{$dev_name}) { Log3 $name, 2, "DbLog $name -> Device '$dev_name' used by addLog doesn't exist !"; next; } - + my $r = $defs{$dev_name}{READINGS}; my $DbLogExclude = AttrVal($dev_name, "DbLogExclude", undef); my $DbLogInclude = AttrVal($dev_name, "DbLogInclude", undef); my @exrds; my $found = 0; - - foreach my $rd (sort keys %{$r}) { # jedes Reading des Devices auswerten + + foreach my $rd (sort keys %{$r}) { # jedes Reading des Devices auswerten my $do = 1; $found = 1 if($rd =~ m/^$rdspec$/); # Reading gefunden if($DbLogExclude && !$nce) { my @v1 = split(/,/, $DbLogExclude); for (my $i=0; $i ausschließen vom addLog + if($rd =~ m,^$v2[0]$,) { # Reading matcht $DbLogExclude -> ausschließen vom addLog $do = 0; if($DbLogInclude) { my @v3 = split(/,/, $DbLogInclude); for (my $i=0; $i wieder in addLog einschließen + $do = 1 if($rd =~ m,^$v4[0]$,); # Reading matcht $DbLogInclude -> wieder in addLog einschließen } - } - Log3 $name, 2, "DbLog $name -> Device: \"$dev_name\", reading: \"$v2[0]\" excluded by attribute DbLogExclude from addLog !" if($do == 0 && $rd =~ m/^$rdspec$/); + } + Log3 $name, 2, "DbLog $name -> Device: \"$dev_name\", reading: \"$v2[0]\" excluded by attribute DbLogExclude from addLog !" if($do == 0 && $rd =~ m/^$rdspec$/); } } } next if(!$do); - push @exrds,$rd if($rd =~ m/^$rdspec$/); + push @exrds,$rd if($rd =~ m/^$rdspec$/); } - + Log3 $name, 4, "DbLog $name -> Readings extracted from Regex: @exrds"; if(!$found) { if(goodReadingName($rdspec) && defined($value)) { Log3 $name, 3, "DbLog $name -> addLog WARNING - Device: '$dev_name' -> Reading '$rdspec' not found - add it as new reading."; push @exrds,$rdspec; - } + } elsif (goodReadingName($rdspec) && !defined($value)) { Log3 $name, 2, "DbLog $name -> addLog WARNING - Device: '$dev_name' -> new Reading '$rdspec' has no value - can't add it !"; - } + } else { Log3 $name, 2, "DbLog $name -> addLog WARNING - Device: '$dev_name' -> Readingname '$rdspec' is no valid or regexp - can't add regexp as new reading !"; } } - - no warnings 'uninitialized'; + + no warnings 'uninitialized'; foreach (@exrds) { $dev_reading = $_; $read_val = $value ne "" ? $value : ReadingsVal($dev_name,$dev_reading,""); $dev_type = uc($defs{$dev_name}{TYPE}); - + # dummy-Event zusammenstellen - $event = $dev_reading.": ".$read_val; + $event = $dev_reading.": ".$read_val; # den zusammengestellten Event parsen lassen (evtl. Unit zuweisen) my @r = DbLog_ParseEvent($name,$dev_name, $dev_type, $event); @@ -4985,15 +4990,15 @@ sub DbLog_AddLog { $ut = $r[2]; if(!defined $dev_reading) {$dev_reading = "";} if(!defined $read_val) {$read_val = "";} - if(!defined $ut || $ut eq "") {$ut = AttrVal("$dev_name", "unit", "");} - + if(!defined $ut || $ut eq "") {$ut = AttrVal("$dev_name", "unit", "");} + $event = "addLog"; - + $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME} = $now; $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE} = $read_val; - + $ts = TimeNow(); - + my $ctz = AttrVal($name, 'convertTimezone', 'none'); # convert time zone if($ctz ne 'none') { my $err; @@ -5004,20 +5009,20 @@ sub DbLog_AddLog { tzconv => $ctz, writelog => 0 }; - + ($err, $ts) = convertTimeZone ($params); - + if ($err) { Log3 ($name, 1, "DbLog $name - ERROR while converting time zone: $err - exit log loop !"); last; } } - - # Anwender spezifische Funktion anwenden + + # Anwender spezifische Funktion anwenden if($value_fn ne '') { my $lastt = $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME}; # patch Forum:#111423 my $lastv = $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE}; - + my $TIMESTAMP = $ts; my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 my $DEVICE = $dev_name; @@ -5032,77 +5037,77 @@ sub DbLog_AddLog { eval $value_fn; Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - + if($IGNORE) { # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME} = $lastt if($lastt); # patch Forum:#111423 - $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE} = $lastv if(defined $lastv); - next; + $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE} = $lastv if(defined $lastv); + next; } - + my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; if (!$@) { $ts = $TIMESTAMP; - } + } else { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } - + $dev_name = $DEVICE if($DEVICE ne ''); $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); $dev_reading = $READING if($READING ne ''); $read_val = $VALUE if(defined $VALUE); $ut = $UNIT if(defined $UNIT); } - + # Daten auf maximale Länge beschneiden ($dev_name,$dev_type,$event,$dev_reading,$read_val,$ut) = DbLog_cutCol($hash,$dev_name,$dev_type,$event,$dev_reading,$read_val,$ut); - + if(AttrVal($name, "useCharfilter",0)) { $dev_reading = DbLog_charfilter($dev_reading); $read_val = DbLog_charfilter($read_val); } - + my $row = ($ts."|".$dev_name."|".$dev_type."|".$event."|".$dev_reading."|".$read_val."|".$ut); Log3 $hash->{NAME}, 3, "DbLog $name -> addLog created - TS: $ts, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $dev_reading, Value: $read_val, Unit: $ut" if(!AttrVal($name, "suppressAddLogV3",0)); - + if($async) { # asynchoner non-blocking Mode # Cache & CacheIndex für Events zum asynchronen Schreiben in DB $data{DbLog}{$name}{cache}{index}++; my $index = $data{DbLog}{$name}{cache}{index}; $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - - my $memcount = $data{DbLog}{$name}{cache}{memcache} ? - scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}) : + + my $memcount = $data{DbLog}{$name}{cache}{memcache} ? + scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}) : 0; - + if($ce == 1) { - readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); - } - else { - readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); } - } + else { + readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + } + } else { - # synchoner Mode + # synchoner Mode push(@row_array, $row); } } use warnings; } - - if(!$async) { + + if(!$async) { if(@row_array) { # synchoner Mode # return wenn "reopen" mit Ablaufzeit gestartet ist - return if($hash->{HELPER}{REOPEN_RUNS}); + return if($hash->{HELPER}{REOPEN_RUNS}); my $error = DbLog_Push($hash, 1, @row_array); my $state = $error ? $error : (IsDisabled($name)) ? "disabled" : "connected"; DbLog_setReadingstate ($hash, $state); - + Log3 $name, 5, "DbLog $name -> DbLog_Push Returncode: $error"; } } @@ -5118,24 +5123,24 @@ sub DbLog_addCacheLine { my ($hash,$i_timestamp,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit) = @_; my $name = $hash->{NAME}; my $ce = AttrVal($name, "cacheEvents", 0); - my $value_fn = AttrVal( $name, "valueFn", "" ); - + my $value_fn = AttrVal( $name, "valueFn", "" ); + # Funktion aus Attr valueFn validieren if( $value_fn =~ m/^\s*(\{.*\})\s*$/s ) { $value_fn = $1; - } + } else { $value_fn = ''; } - + if($value_fn ne '') { my $lastt; my $lastv; - if($defs{$i_dev}) { + if($defs{$i_dev}) { $lastt = $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{TIME}; $lastv = $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{VALUE}; } - + my $TIMESTAMP = $i_timestamp; my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 my $DEVICE = $i_dev; @@ -5150,19 +5155,19 @@ sub DbLog_addCacheLine { eval $value_fn; Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - - if($IGNORE) { # kein add wenn $IGNORE=1 gesetzt + + if($IGNORE) { # kein add wenn $IGNORE=1 gesetzt $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{TIME} = $lastt if($defs{$i_dev} && $lastt); # patch Forum:#111423 - $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{VALUE} = $lastv if($defs{$i_dev} && defined $lastv); + $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{VALUE} = $lastv if($defs{$i_dev} && defined $lastv); Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by valueFn - TS: $i_timestamp, Device: $i_dev, Type: $i_type, Event: $i_evt, Reading: $i_reading, Value: $i_val, Unit: $i_unit"; - next; + next; } - + my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; if (!$@) { $i_timestamp = $TIMESTAMP; - } + } else { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } @@ -5173,27 +5178,27 @@ sub DbLog_addCacheLine { $i_val = $VALUE if(defined $VALUE); $i_unit = $UNIT if(defined $UNIT); } - - no warnings 'uninitialized'; + + no warnings 'uninitialized'; # Daten auf maximale Länge beschneiden ($i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit) = DbLog_cutCol($hash,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit); - + my $row = ($i_timestamp."|".$i_dev."|".$i_type."|".$i_evt."|".$i_reading."|".$i_val."|".$i_unit); $row = DbLog_charfilter($row) if(AttrVal($name, "useCharfilter",0)); Log3 $hash->{NAME}, 3, "DbLog $name -> added by addCacheLine - TS: $i_timestamp, Device: $i_dev, Type: $i_type, Event: $i_evt, Reading: $i_reading, Value: $i_val, Unit: $i_unit"; use warnings; - + eval { # one transaction $data{DbLog}{$name}{cache}{index}++; my $index = $data{DbLog}{$name}{cache}{index}; $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - + my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; if($ce == 1) { - readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); - } + readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); + } else { - readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); } }; @@ -5207,11 +5212,11 @@ return; ######################################################################################### sub DbLog_cutCol { my ($hash,$dn,$dt,$evt,$rd,$val,$unit) = @_; - my $name = $hash->{NAME}; + my $name = $hash->{NAME}; my $colevent = AttrVal($name, 'colEvent', undef); my $colreading = AttrVal($name, 'colReading', undef); my $colvalue = AttrVal($name, 'colValue', undef); - + if ($hash->{MODEL} ne 'SQLITE' || defined($colevent) || defined($colreading) || defined($colvalue) ) { $dn = substr($dn,0, $hash->{HELPER}{DEVICECOL}); $dt = substr($dt,0, $hash->{HELPER}{TYPECOL}); @@ -5228,7 +5233,7 @@ return ($dn,$dt,$evt,$rd,$val,$unit); # verwendet werden soll # # basic_ta:on - Autocommit Servereinstellung / Transaktion ein -# basic_ta:off - Autocommit Servereinstellung / Transaktion aus +# basic_ta:off - Autocommit Servereinstellung / Transaktion aus # ac:on_ta:on - Autocommit ein / Transaktion ein # ac:on_ta:off - Autocommit ein / Transaktion aus # ac:off_ta:on - Autocommit aus / Transaktion ein (AC aus impliziert TA ein) @@ -5236,26 +5241,26 @@ return ($dn,$dt,$evt,$rd,$val,$unit); # Autocommit: 0/1/2 = aus/ein/Servereinstellung # Transaktion: 0/1 = aus/ein ############################################################################### -sub DbLog_commitMode { +sub DbLog_commitMode { my $hash = shift; my $name = $hash->{NAME}; my $useac = 2; # default Servereinstellung my $useta = 1; # default Transaktion ein - + my $cm = AttrVal($name, "commitMode", "basic_ta:on"); my ($ac,$ta) = split("_",$cm); $useac = ($ac =~ /off/)?0:($ac =~ /on/)?1:2; $useta = 0 if($ta =~ /off/); - + return($useac,$useta); } ############################################################################### -# Zeichen von Feldevents filtern +# Zeichen von Feldevents filtern ############################################################################### -sub DbLog_charfilter { +sub DbLog_charfilter { my $txt = shift; - + my ($p,$a); # nur erwünschte Zeichen ASCII %d32-126 und Sonderzeichen @@ -5268,11 +5273,11 @@ sub DbLog_charfilter { $txt =~ s/Ü/Ue/g; $txt =~ s/€/EUR/g; $txt =~ s/\xb0/1degree1/g; - + $txt =~ tr/ A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~//cd; - + $txt =~ s/1degree1/°/g; - + return($txt); } @@ -5287,7 +5292,7 @@ sub DbLog_reduceLog { my ($name,$startTime,$currentHour,$currentDay,$deletedCount,$updateCount,$sum,$rowCount,$excludeCount) = ($hash->{NAME},time(),99,0,0,0,0,0,0); my $dbh = DbLog_ConnectNewDBH($hash); return if(!$dbh); - + if ($a[-1] =~ /^EXCLUDE=(.+:.+)+/i) { ($filter) = $a[-1] =~ /^EXCLUDE=(.+)/i; @excludeRegex = split(',',$filter); @@ -5300,32 +5305,32 @@ sub DbLog_reduceLog { Log3($name, 3, "DbLog $name: reduceLog requested with DAYS=$a[2]" .(($average || $filter) ? ', ' : '').(($average) ? "$average" : '') .(($average && $filter) ? ", " : '').(($filter) ? uc((split('=',$a[-1]))[0]).'='.(split('=',$a[-1]))[1] : '')); - + my ($useac,$useta) = DbLog_commitMode($hash); my $ac = ($dbh->{AutoCommit}) ? "ON" : "OFF"; my $tm = ($useta) ? "ON" : "OFF"; - + Log3 $hash->{NAME}, 4, "DbLog $name -> AutoCommit mode: $ac, Transaction mode: $tm"; - + my ($od,$nd) = split(":",$a[2]); # $od - Tage älter als , $nd - Tage neuer als my ($ots,$nts); - - if ($hash->{MODEL} eq 'SQLITE') { + + if ($hash->{MODEL} eq 'SQLITE') { $ots = "datetime('now', '-$od days')"; $nts = "datetime('now', '-$nd days')" if($nd); - } - elsif ($hash->{MODEL} eq 'MYSQL') { - $ots = "DATE_SUB(CURDATE(),INTERVAL $od DAY)"; - $nts = "DATE_SUB(CURDATE(),INTERVAL $nd DAY)" if($nd); - } - elsif ($hash->{MODEL} eq 'POSTGRESQL') { - $ots = "NOW() - INTERVAL '$od' DAY"; - $nts = "NOW() - INTERVAL '$nd' DAY" if($nd); - } - else { - $ret = 'Unknown database type.'; } - + elsif ($hash->{MODEL} eq 'MYSQL') { + $ots = "DATE_SUB(CURDATE(),INTERVAL $od DAY)"; + $nts = "DATE_SUB(CURDATE(),INTERVAL $nd DAY)" if($nd); + } + elsif ($hash->{MODEL} eq 'POSTGRESQL') { + $ots = "NOW() - INTERVAL '$od' DAY"; + $nts = "NOW() - INTERVAL '$nd' DAY" if($nd); + } + else { + $ret = 'Unknown database type.'; + } + if ($ots) { my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); eval { $sth_del = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); @@ -5336,9 +5341,9 @@ sub DbLog_reduceLog { .($a[-1] =~ /^INCLUDE=(.+):(.+)$/i ? "DEVICE like '$1' AND READING like '$2' AND " : '') ."TIMESTAMP < $ots".($nts?" AND TIMESTAMP >= $nts ":" ")."ORDER BY TIMESTAMP ASC"); # '' was EVENT, no longer in use }; - + $sth_get->execute(); - + do { $row = $sth_get->fetchrow_arrayref || ['0000-00-00 00:00:00','D','','R','V']; # || execute last-day dummy $ret = 1; @@ -5356,23 +5361,23 @@ sub DbLog_reduceLog { $deletedCount += $c; Log3($name, 3, "DbLog $name: reduceLog deleting $c records of day: $processingDay"); $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; eval { my $i = 0; my $k = 1; - my $th = ($#dayRows <= 2000) ? 100 : - ($#dayRows <= 30000) ? 1000 : + my $th = ($#dayRows <= 2000) ? 100 : + ($#dayRows <= 30000) ? 1000 : 10000; - + for my $delRow (@dayRows) { if($day != 00 || $delRow->[0] !~ /$lastHour/) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE (DEVICE=$delRow->[1]) AND (READING=$delRow->[3]) AND (TIMESTAMP=$delRow->[0]) AND (VALUE=$delRow->[4])"); $sth_del->execute(($delRow->[1], $delRow->[3], $delRow->[0], $delRow->[4])); $i++; - + if($i == $th) { - my $prog = $k * $i; + my $prog = $k * $i; Log3($name, 3, "DbLog $name: reduceLog deletion progress of day: $processingDay is: $prog"); $i = 0; $k++; @@ -5380,29 +5385,29 @@ sub DbLog_reduceLog { } } }; - + if ($@) { Log3($hash->{NAME}, 3, "DbLog $name: reduceLog ! FAILED ! for day $processingDay"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; $ret = 0; - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; } - $dbh->{RaiseError} = 0; + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; } - + @dayRows = (); } - + if ($ret && defined($a[3]) && $a[3] =~ /average/i) { $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; eval { push(@averageUpd, {%hourlyKnown}) if($day != 00); - + $c = 0; for my $hourHash (@averageUpd) { # Only count for logging... for my $hourKey (keys %$hourHash) { @@ -5415,10 +5420,10 @@ sub DbLog_reduceLog { my $i = 0; my $k = 1; my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; - + for my $hourHash (@averageUpd) { for my $hourKey (keys %$hourHash) { - if ($hourHash->{$hourKey}->[0]) { # true if reading is a number + if ($hourHash->{$hourKey}->[0]) { # true if reading is a number ($updDate,$updHour) = $hourHash->{$hourKey}->[0] =~ /(.*\d+)\s(\d{2}):/; if (scalar(@{$hourHash->{$hourKey}->[4]}) > 1) { # true if reading has multiple records this hour for (@{$hourHash->{$hourKey}->[4]}) { $sum += $_; } @@ -5426,20 +5431,20 @@ sub DbLog_reduceLog { $sum = 0; Log3($name, 5, "DbLog $name: UPDATE $history SET TIMESTAMP=$updDate $updHour:30:00, EVENT='rl_av_h', VALUE=$average WHERE DEVICE=$hourHash->{$hourKey}->[1] AND READING=$hourHash->{$hourKey}->[3] AND TIMESTAMP=$hourHash->{$hourKey}->[0] AND VALUE=$hourHash->{$hourKey}->[4]->[0]"); $sth_upd->execute(("$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[4]->[0])); - + $i++; if($i == $th) { - my $prog = $k * $i; + my $prog = $k * $i; Log3($name, 3, "DbLog $name: reduceLog (hourly-average) updating progress of day: $processingDay is: $prog"); $i = 0; $k++; - } + } push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); - } + } else { push(@averageUpdD, [$hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[2], $hourHash->{$hourKey}->[4]->[0], $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } - } + } } } }; @@ -5448,16 +5453,16 @@ sub DbLog_reduceLog { Log3($hash->{NAME}, 2, "DbLog $name - reduceLogNbl ! FAILED ! for day $processingDay: $err"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; @averageUpdD = (); - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; @averageUpd = (); } - + if (defined($a[3]) && $a[3] =~ /average=day/i && scalar(@averageUpdD) && $day != 00) { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; @@ -5468,71 +5473,71 @@ sub DbLog_reduceLog { $averageHash{$_->[3].$_->[4]}->{sum} += $_->[2]; $averageHash{$_->[3].$_->[4]}->{date} = $_->[5]; } - + $c = 0; for (keys %averageHash) { if(scalar @{$averageHash{$_}->{tedr}} == 1) { delete $averageHash{$_}; - } + } else { $c += (scalar(@{$averageHash{$_}->{tedr}}) - 1); } } $deletedCount += $c; $updateCount += keys(%averageHash); - + my ($id,$iu) = (0,0); my ($kd,$ku) = (1,1); - my $thd = ($c <= 2000)?100:($c <= 30000) ? 1000 : 10000; + my $thd = ($c <= 2000)?100:($c <= 30000) ? 1000 : 10000; my $thu = ((keys %averageHash) <= 2000) ? 100 : ((keys %averageHash) <= 30000) ? 1000 : - 10000; - + 10000; + Log3($name, 3, "DbLog $name: reduceLog (daily-average) updating ".(keys %averageHash).", deleting $c records of day: $processingDay") if(keys %averageHash); - + for my $reading (keys %averageHash) { $average = sprintf('%.3f', $averageHash{$reading}->{sum}/scalar(@{$averageHash{$reading}->{tedr}})); $lastUpdH = pop @{$averageHash{$reading}->{tedr}}; - + for (@{$averageHash{$reading}->{tedr}}) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE DEVICE='$_->[2]' AND READING='$_->[3]' AND TIMESTAMP='$_->[0]'"); $sth_delD->execute(($_->[2], $_->[3], $_->[0])); - + $id++; if($id == $thd) { - my $prog = $kd * $id; + my $prog = $kd * $id; Log3($name, 3, "DbLog $name: reduceLog (daily-average) deleting progress of day: $processingDay is: $prog"); $id = 0; $kd++; } } - + Log3($name, 5, "DbLog $name: UPDATE $history SET TIMESTAMP=$averageHash{$reading}->{date} 12:00:00, EVENT='rl_av_d', VALUE=$average WHERE (DEVICE=$lastUpdH->[2]) AND (READING=$lastUpdH->[3]) AND (TIMESTAMP=$lastUpdH->[0])"); - + $sth_updD->execute(($averageHash{$reading}->{date}." 12:00:00", 'rl_av_d', $average, $lastUpdH->[2], $lastUpdH->[3], $lastUpdH->[0])); - + $iu++; if($iu == $thu) { - my $prog = $ku * $id; + my $prog = $ku * $id; Log3($name, 3, "DbLog $name: reduceLog (daily-average) updating progress of day: $processingDay is: $prog"); $iu = 0; $ku++; - } + } } }; if ($@) { $err = $@; Log3($hash->{NAME}, 2, "DbLog $name - reduceLogNbl ! FAILED ! for day $processingDay: $err"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; } - + %averageHash = (); %hourlyKnown = (); @averageUpd = (); @@ -5541,16 +5546,16 @@ sub DbLog_reduceLog { } $currentDay = $day; } - + if ($hour != $currentHour) { # forget records from last hour, but remember these for average if (defined($a[3]) && $a[3] =~ /average/i && keys(%hourlyKnown)) { push(@averageUpd, {%hourlyKnown}); } - + %hourlyKnown = (); $currentHour = $hour; } - + if (defined $hourlyKnown{$row->[1].$row->[3]}) { # remember first readings for device per h, other can be deleted push(@dayRows, [@$row]); if (defined($a[3]) && $a[3] =~ /average/i && defined($row->[4]) && $row->[4] =~ /^-?(?:\d+(?:\.\d*)?|\.\d+)$/ && $hourlyKnown{$row->[1].$row->[3]}->[0]) { @@ -5558,36 +5563,36 @@ sub DbLog_reduceLog { push(@{$hourlyKnown{$row->[1].$row->[3]}->[4]}, $row->[4]); } } - } + } else { $exclude = 0; for (@excludeRegex) { $exclude = 1 if("$row->[1]:$row->[3]" =~ /^$_$/); } - + if ($exclude) { $excludeCount++ if($day != 00); - } + } else { $hourlyKnown{$row->[1].$row->[3]} = (defined($row->[4]) && $row->[4] =~ /^-?(?:\d+(?:\.\d*)?|\.\d+)$/) ? [$row->[0],$row->[1],$row->[2],$row->[3],[$row->[4]]] : [0]; } } - + $processingDay = (split(' ',$row->[0]))[0]; - + } while ($day != 00); - + my $result = "Rows processed: $rowCount, deleted: $deletedCount" .((defined($a[3]) && $a[3] =~ /average/i)? ", updated: $updateCount" : '') .(($excludeCount)? ", excluded: $excludeCount" : '') .", time: ".sprintf('%.2f',time() - $startTime)."sec"; - + Log3($name, 3, "DbLog $name: reduceLog executed. $result"); - + readingsSingleUpdate($hash,"reduceLogState",$result,1); $ret = "reduceLog executed. $result"; } - $dbh->disconnect(); + $dbh->disconnect(); return $ret; } @@ -5604,22 +5609,22 @@ sub DbLog_reduceLogNbl { my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; - + delete $hash->{HELPER}{REDUCELOG}; - + my ($ret,$row,$filter,$exclude,$c,$day,$hour,$lastHour,$updDate,$updHour,$average,$processingDay,$lastUpdH,%hourlyKnown,%averageHash,@excludeRegex,@dayRows,@averageUpd,@averageUpdD); my ($startTime,$currentHour,$currentDay,$deletedCount,$updateCount,$sum,$rowCount,$excludeCount) = (time(),99,0,0,0,0,0,0); my ($dbh,$err); - + Log3 ($name, 5, "DbLog $name -> Start DbLog_reduceLogNbl"); - + my ($useac,$useta) = DbLog_commitMode($hash); if (!$useac) { eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, AutoCommit => 0 });}; - } + } elsif ($useac == 1) { eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, AutoCommit => 1 });}; - } + } else { # Server default eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1 });}; @@ -5630,47 +5635,47 @@ sub DbLog_reduceLogNbl { Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); return "$name|''|$err"; } - + if ($a[-1] =~ /^EXCLUDE=(.+:.+)+/i) { ($filter) = $a[-1] =~ /^EXCLUDE=(.+)/i; @excludeRegex = split(',',$filter); - } + } elsif ($a[-1] =~ /^INCLUDE=.+:.+$/i) { $filter = 1; } - + if (defined($a[3])) { $average = ($a[3] =~ /average=day/i) ? "AVERAGE=DAY" : ($a[3] =~ /average/i) ? "AVERAGE=HOUR" : 0; } - + Log3($name, 3, "DbLog $name: reduceLogNbl requested with DAYS=$a[2]" .(($average || $filter) ? ', ' : '').(($average) ? "$average" : '') .(($average && $filter) ? ", " : '').(($filter) ? uc((split('=',$a[-1]))[0]).'='.(split('=',$a[-1]))[1] : '')); - + my $ac = ($dbh->{AutoCommit}) ? "ON" : "OFF"; my $tm = ($useta) ? "ON" : "OFF"; - + Log3 $hash->{NAME}, 4, "DbLog $name -> AutoCommit mode: $ac, Transaction mode: $tm"; my ($od,$nd) = split(":",$a[2]); # $od - Tage älter als , $nd - Tage neuer als my ($ots,$nts); - - if ($hash->{MODEL} eq 'SQLITE') { + + if ($hash->{MODEL} eq 'SQLITE') { $ots = "datetime('now', '-$od days')"; $nts = "datetime('now', '-$nd days')" if($nd); - } - elsif ($hash->{MODEL} eq 'MYSQL') { - $ots = "DATE_SUB(CURDATE(),INTERVAL $od DAY)"; - $nts = "DATE_SUB(CURDATE(),INTERVAL $nd DAY)" if($nd); - } - elsif ($hash->{MODEL} eq 'POSTGRESQL') { - $ots = "NOW() - INTERVAL '$od' DAY"; - $nts = "NOW() - INTERVAL '$nd' DAY" if($nd); - } - else { - $ret = 'Unknown database type.'; } - + elsif ($hash->{MODEL} eq 'MYSQL') { + $ots = "DATE_SUB(CURDATE(),INTERVAL $od DAY)"; + $nts = "DATE_SUB(CURDATE(),INTERVAL $nd DAY)" if($nd); + } + elsif ($hash->{MODEL} eq 'POSTGRESQL') { + $ots = "NOW() - INTERVAL '$od' DAY"; + $nts = "NOW() - INTERVAL '$nd' DAY" if($nd); + } + else { + $ret = 'Unknown database type.'; + } + if ($ots) { my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); eval { $sth_del = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); @@ -5687,7 +5692,7 @@ sub DbLog_reduceLogNbl { Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); return "$name|''|$err"; } - + eval { $sth_get->execute(); }; if ($@) { $err = encode_base64($@,""); @@ -5695,48 +5700,48 @@ sub DbLog_reduceLogNbl { Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); return "$name|''|$err"; } - + do { $row = $sth_get->fetchrow_arrayref || ['0000-00-00 00:00:00','D','','R','V']; # || execute last-day dummy $ret = 1; ($day,$hour) = $row->[0] =~ /-(\d{2})\s(\d{2}):/; $rowCount++ if($day != 00); - + if ($day != $currentDay) { if ($currentDay) { # false on first executed day if (scalar @dayRows) { ($lastHour) = $dayRows[-1]->[0] =~ /(.*\d+\s\d{2}):/; $c = 0; - + for my $delRow (@dayRows) { $c++ if($day != 00 || $delRow->[0] !~ /$lastHour/); } - + if($c) { $deletedCount += $c; - + Log3($name, 3, "DbLog $name: reduceLogNbl deleting $c records of day: $processingDay"); - + $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; - + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } - + eval { my $i = 0; my $k = 1; my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; - + for my $delRow (@dayRows) { if($day != 00 || $delRow->[0] !~ /$lastHour/) { Log3($name, 4, "DbLog $name: DELETE FROM $history WHERE (DEVICE=$delRow->[1]) AND (READING=$delRow->[3]) AND (TIMESTAMP=$delRow->[0]) AND (VALUE=$delRow->[4])"); $sth_del->execute(($delRow->[1], $delRow->[3], $delRow->[0], $delRow->[4])); $i++; if($i == $th) { - my $prog = $k * $i; + my $prog = $k * $i; Log3($name, 3, "DbLog $name: reduceLogNbl deletion progress of day: $processingDay is: $prog"); $i = 0; $k++; @@ -5752,76 +5757,76 @@ sub DbLog_reduceLogNbl { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } $ret = 0; - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; } - + @dayRows = (); } - + if ($ret && defined($a[3]) && $a[3] =~ /average/i) { $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } - + eval { push(@averageUpd, {%hourlyKnown}) if($day != 00); - + $c = 0; for my $hourHash (@averageUpd) { # Only count for logging... for my $hourKey (keys %$hourHash) { $c++ if ($hourHash->{$hourKey}->[0] && scalar(@{$hourHash->{$hourKey}->[4]}) > 1); } } - + $updateCount += $c; Log3($name, 3, "DbLog $name: reduceLogNbl (hourly-average) updating $c records of day: $processingDay") if($c); # else only push to @averageUpdD - + my $i = 0; my $k = 1; my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; - + for my $hourHash (@averageUpd) { for my $hourKey (keys %$hourHash) { - if ($hourHash->{$hourKey}->[0]) { # true if reading is a number + if ($hourHash->{$hourKey}->[0]) { # true if reading is a number ($updDate,$updHour) = $hourHash->{$hourKey}->[0] =~ /(.*\d+)\s(\d{2}):/; if (scalar(@{$hourHash->{$hourKey}->[4]}) > 1) { # true if reading has multiple records this hour for (@{$hourHash->{$hourKey}->[4]}) { $sum += $_; } $average = sprintf('%.3f', $sum/scalar(@{$hourHash->{$hourKey}->[4]}) ); $sum = 0; - + Log3($name, 4, "DbLog $name: UPDATE $history SET TIMESTAMP=$updDate $updHour:30:00, EVENT='rl_av_h', VALUE=$average WHERE DEVICE=$hourHash->{$hourKey}->[1] AND READING=$hourHash->{$hourKey}->[3] AND TIMESTAMP=$hourHash->{$hourKey}->[0] AND VALUE=$hourHash->{$hourKey}->[4]->[0]"); - + $sth_upd->execute(("$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[4]->[0])); - + $i++; if($i == $th) { - my $prog = $k * $i; + my $prog = $k * $i; Log3($name, 3, "DbLog $name: reduceLogNbl (hourly-average) updating progress of day: $processingDay is: $prog"); $i = 0; $k++; - } + } push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); - } + } else { push(@averageUpdD, [$hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[2], $hourHash->{$hourKey}->[4]->[0], $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } - } + } } } }; - + if ($@) { $err = $@; Log3($hash->{NAME}, 2, "DbLog $name - reduceLogNbl average=hour ! FAILED ! for day $processingDay: $err"); @@ -5830,19 +5835,19 @@ sub DbLog_reduceLogNbl { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } @averageUpdD = (); - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); - } + } } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; @averageUpd = (); } - + if (defined($a[3]) && $a[3] =~ /average=day/i && scalar(@averageUpdD) && $day != 00) { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; @@ -5850,65 +5855,65 @@ sub DbLog_reduceLogNbl { if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } - + eval { for (@averageUpdD) { push(@{$averageHash{$_->[3].$_->[4]}->{tedr}}, [$_->[0], $_->[1], $_->[3], $_->[4]]); $averageHash{$_->[3].$_->[4]}->{sum} += $_->[2]; $averageHash{$_->[3].$_->[4]}->{date} = $_->[5]; } - + $c = 0; for (keys %averageHash) { if(scalar @{$averageHash{$_}->{tedr}} == 1) { delete $averageHash{$_}; - } + } else { $c += (scalar(@{$averageHash{$_}->{tedr}}) - 1); } } $deletedCount += $c; $updateCount += keys(%averageHash); - + my ($id,$iu) = (0,0); my ($kd,$ku) = (1,1); - my $thd = ($c <= 2000) ? 100 : - ($c <= 30000) ? 1000 : + my $thd = ($c <= 2000) ? 100 : + ($c <= 30000) ? 1000 : 10000; - my $thu = ((keys %averageHash) <= 2000) ? 100 : + my $thu = ((keys %averageHash) <= 2000) ? 100 : ((keys %averageHash) <= 30000) ? 1000 : 10000; - + Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) updating ".(keys %averageHash).", deleting $c records of day: $processingDay") if(keys %averageHash); - + for my $reading (keys %averageHash) { $average = sprintf('%.3f', $averageHash{$reading}->{sum}/scalar(@{$averageHash{$reading}->{tedr}})); $lastUpdH = pop @{$averageHash{$reading}->{tedr}}; - + for (@{$averageHash{$reading}->{tedr}}) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE DEVICE='$_->[2]' AND READING='$_->[3]' AND TIMESTAMP='$_->[0]'"); $sth_delD->execute(($_->[2], $_->[3], $_->[0])); - + $id++; if($id == $thd) { - my $prog = $kd * $id; + my $prog = $kd * $id; Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) deleting progress of day: $processingDay is: $prog"); $id = 0; $kd++; } } - + Log3($name, 4, "DbLog $name: UPDATE $history SET TIMESTAMP=$averageHash{$reading}->{date} 12:00:00, EVENT='rl_av_d', VALUE=$average WHERE (DEVICE=$lastUpdH->[2]) AND (READING=$lastUpdH->[3]) AND (TIMESTAMP=$lastUpdH->[0])"); - + $sth_updD->execute(($averageHash{$reading}->{date}." 12:00:00", 'rl_av_d', $average, $lastUpdH->[2], $lastUpdH->[3], $lastUpdH->[0])); - + $iu++; if($iu == $thu) { - my $prog = $ku * $id; + my $prog = $ku * $id; Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) updating progress of day: $processingDay is: $prog"); $iu = 0; $ku++; - } + } } }; if ($@) { @@ -5917,28 +5922,28 @@ sub DbLog_reduceLogNbl { if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } - } + } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } } - - $dbh->{RaiseError} = 0; + + $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; } - + %averageHash = (); %hourlyKnown = (); @averageUpd = (); @averageUpdD = (); $currentHour = 99; } - + $currentDay = $day; } - + if ($hour != $currentHour) { # forget records from last hour, but remember these for average if (defined($a[3]) && $a[3] =~ /average/i && keys(%hourlyKnown)) { push(@averageUpd, {%hourlyKnown}); @@ -5953,39 +5958,39 @@ sub DbLog_reduceLogNbl { push(@{$hourlyKnown{$row->[1].$row->[3]}->[4]}, $row->[4]); } } - } + } else { $exclude = 0; for (@excludeRegex) { $exclude = 1 if("$row->[1]:$row->[3]" =~ /^$_$/); } - + if ($exclude) { $excludeCount++ if($day != 00); - } + } else { $hourlyKnown{$row->[1].$row->[3]} = (defined($row->[4]) && $row->[4] =~ /^-?(?:\d+(?:\.\d*)?|\.\d+)$/) ? [$row->[0],$row->[1],$row->[2],$row->[3],[$row->[4]]] : [0]; } } $processingDay = (split(' ',$row->[0]))[0]; - + } while( $day != 00 ); - + my $result = "Rows processed: $rowCount, deleted: $deletedCount" .((defined($a[3]) && $a[3] =~ /average/i)? ", updated: $updateCount" : '') .(($excludeCount)? ", excluded: $excludeCount" : '') .", time: ".sprintf('%.2f',time() - $startTime)."sec"; - + Log3($name, 3, "DbLog $name: reduceLogNbl finished. $result"); - + $ret = $result; $ret = "reduceLogNbl finished. $result"; } - + $dbh->disconnect(); $ret = encode_base64($ret,""); Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); - + return "$name|$ret|0"; } @@ -6000,11 +6005,11 @@ sub DbLog_reduceLogNbl_finished { my $ret = decode_base64($a[1]); my $err; $err = decode_base64($a[2]) if ($a[2]); - + readingsSingleUpdate($hash,"reduceLogState", $err // $ret, 1); - + delete $hash->{HELPER}{REDUCELOG_PID}; - + return; } @@ -6017,15 +6022,15 @@ sub DbLog_countNbl { my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; my ($cc,$hc,$bst,$st,$rt); - + # Background-Startzeit $bst = [gettimeofday]; - + my $dbh = DbLog_ConnectNewDBH($hash); if (!$dbh) { my $err = encode_base64("DbLog $name: DBLog_Set - count - DB connect not possible",""); return "$name|0|0|$err|0"; - } + } else { Log3 $name,4,"DbLog $name: Records count requested."; # SQL-Startzeit @@ -6036,7 +6041,7 @@ sub DbLog_countNbl { # SQL-Laufzeit ermitteln $rt = tv_interval($st); } - + # Background-Laufzeit ermitteln my $brt = tv_interval($bst); $rt = $rt.",".$brt; @@ -6055,22 +6060,22 @@ sub DbLog_countNbl_finished { my $hc = $a[2]; my ($err,$bt); $err = decode_base64($a[3]) if($a[3]); - $bt = $a[4] if($a[4]); + $bt = $a[4] if($a[4]); DbLog_setReadingstate ($hash, $err) if($err); readingsSingleUpdate ($hash,"countHistory",$hc,1) if ($hc); readingsSingleUpdate ($hash,"countCurrent",$cc,1) if ($cc); - + if(AttrVal($name, "showproctime", undef) && $bt) { my ($rt,$brt) = split(",", $bt); readingsBeginUpdate ($hash); - readingsBulkUpdate ($hash, "background_processing_time", sprintf("%.4f",$brt)); + readingsBulkUpdate ($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsBulkUpdate ($hash, "sql_processing_time", sprintf("%.4f",$rt) ); readingsEndUpdate ($hash, 1); } - + delete $hash->{HELPER}{COUNT_PID}; - + return; } @@ -6087,19 +6092,19 @@ sub DbLog_deldaysNbl { my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; my ($cmd,$dbh,$rows,$error,$sth,$ret,$bst,$brt,$st,$rt); - + Log3 ($name, 5, "DbLog $name -> Start DbLog_deldaysNbl $days"); - + # Background-Startzeit $bst = [gettimeofday]; my ($useac,$useta) = DbLog_commitMode($hash); if(!$useac) { eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1 });}; - } + } elsif($useac == 1) { eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1 });}; - } + } else { # Server default eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1 });}; @@ -6108,36 +6113,36 @@ sub DbLog_deldaysNbl { $error = encode_base64($@,""); Log3 ($name, 2, "DbLog $name - Error: $@"); Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl finished"); - return "$name|0|0|$error"; + return "$name|0|0|$error"; } - + my $ac = ($dbh->{AutoCommit})?"ON":"OFF"; my $tm = ($useta)?"ON":"OFF"; Log3 $hash->{NAME}, 4, "DbLog $name -> AutoCommit mode: $ac, Transaction mode: $tm"; - + $cmd = "delete from $history where TIMESTAMP < "; - if ($hash->{MODEL} eq 'SQLITE') { - $cmd .= "datetime('now', '-$days days')"; - } - elsif ($hash->{MODEL} eq 'MYSQL') { - $cmd .= "DATE_SUB(CURDATE(),INTERVAL $days DAY)"; - } - elsif ($hash->{MODEL} eq 'POSTGRESQL') { - $cmd .= "NOW() - INTERVAL '$days' DAY"; - } - else { + if ($hash->{MODEL} eq 'SQLITE') { + $cmd .= "datetime('now', '-$days days')"; + } + elsif ($hash->{MODEL} eq 'MYSQL') { + $cmd .= "DATE_SUB(CURDATE(),INTERVAL $days DAY)"; + } + elsif ($hash->{MODEL} eq 'POSTGRESQL') { + $cmd .= "NOW() - INTERVAL '$days' DAY"; + } + else { $ret = 'Unknown database type. Maybe you can try userCommand anyway.'; $error = encode_base64($ret,""); Log3 ($name, 2, "DbLog $name - Error: $ret"); Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl finished"); - return "$name|0|0|$error"; + return "$name|0|0|$error"; } - + # SQL-Startzeit $st = [gettimeofday]; - - eval { - $sth = $dbh->prepare($cmd); + + eval { + $sth = $dbh->prepare($cmd); $sth->execute(); }; @@ -6146,21 +6151,21 @@ sub DbLog_deldaysNbl { Log3 ($name, 2, "DbLog $name - $@"); $dbh->disconnect; Log3 ($name, 4, "DbLog $name -> BlockingCall DbLog_deldaysNbl finished"); - return "$name|0|0|$error"; - } + return "$name|0|0|$error"; + } else { $rows = $sth->rows; $dbh->commit() if(!$dbh->{AutoCommit}); $dbh->disconnect; - } + } $rt = tv_interval($st); # SQL-Laufzeit ermitteln $brt = tv_interval($bst); # Background-Laufzeit ermitteln $rt = $rt.",".$brt; - + Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl finished"); -return "$name|$rows|$rt|0"; +return "$name|$rows|$rt|0"; } ######################################################################################### @@ -6173,22 +6178,22 @@ sub DbLog_deldaysNbl_done { my $hash = $defs{$name}; my $rows = $a[1]; my($bt,$err); - $bt = $a[2] if ($a[2]); + $bt = $a[2] if ($a[2]); $err = decode_base64($a[3]) if ($a[3]); - + Log3 ($name, 5, "DbLog $name -> Start DbLog_deldaysNbl_done"); - + if ($err) { DbLog_setReadingstate ($hash, $err); delete $hash->{HELPER}{DELDAYS_PID}; Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl_done finished"); return; - } + } else { if(AttrVal($name, "showproctime", undef) && $bt) { my ($rt,$brt) = split(",", $bt); readingsBeginUpdate($hash); - readingsBulkUpdate ($hash, "background_processing_time", sprintf("%.4f",$brt)); + readingsBulkUpdate ($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsBulkUpdate ($hash, "sql_processing_time", sprintf("%.4f",$rt)); readingsEndUpdate ($hash, 1); } @@ -6198,7 +6203,7 @@ sub DbLog_deldaysNbl_done { Log3 ($name, 3, "DbLog $name -> deleteOldDaysNbl finished. $rows entries of database $db deleted."); delete $hash->{HELPER}{DELDAYS_PID}; Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl_done finished"); - + return; } @@ -6215,7 +6220,7 @@ sub DbLog_setinternalcols { $hash->{HELPER}{READINGCOL} = AttrVal($name, "colReading", $DbLog_columns{READING}); $hash->{HELPER}{VALUECOL} = AttrVal($name, "colValue", $DbLog_columns{VALUE} ); $hash->{HELPER}{UNITCOL} = $DbLog_columns{UNIT}; - + $hash->{COLUMNS} = "field length used for Device: $hash->{HELPER}{DEVICECOL}, Type: $hash->{HELPER}{TYPECOL}, Event: $hash->{HELPER}{EVENTCOL}, Reading: $hash->{HELPER}{READINGCOL}, Value: $hash->{HELPER}{VALUECOL}, Unit: $hash->{HELPER}{UNITCOL} "; # Statusbit "Columns sind gesetzt" @@ -6231,9 +6236,9 @@ sub DbLog_reopen { my $hash = shift; my $name = $hash->{NAME}; my $async = AttrVal($name, "asyncMode", undef); - + RemoveInternalTimer($hash, "DbLog_reopen"); - + if(DbLog_ConnectPush($hash)) { # Statusbit "Kein Schreiben in DB erlauben" löschen my $delay = delete $hash->{HELPER}{REOPEN_RUNS}; @@ -6242,11 +6247,11 @@ sub DbLog_reopen { DbLog_setReadingstate ($hash, "reopened"); DbLog_execmemcache ($hash) if($async); - } - else { - InternalTimer(gettimeofday()+30, "DbLog_reopen", $hash, 0); } - + else { + InternalTimer(gettimeofday()+30, "DbLog_reopen", $hash, 0); + } + return; } @@ -6262,7 +6267,7 @@ sub DbLog_checkUsePK { my $upkh = 0; my $upkc = 0; my (@pkh,@pkc); - + my $db = (split("=",(split(";",$dbconn))[0]))[1]; eval {@pkh = $dbh->primary_key( undef, undef, 'history' );}; eval {@pkc = $dbh->primary_key( undef, undef, 'current' );}; @@ -6304,14 +6309,14 @@ sub DbLog_sampleDataFn { my $desc = "Device:Reading"; my $hash = $defs{$dlName}; my $current = $hash->{HELPER}{TC}; - + my @htmlArr; my @example; my @colregs; my $counter; - - my $currentPresent = AttrVal($dlName,'DbLogType','History'); - + + my $currentPresent = AttrVal($dlName,'DbLogType','History'); + my $dbhf = DbLog_ConnectNewDBH($defs{$dlName}); return if(!$dbhf); @@ -6319,18 +6324,18 @@ sub DbLog_sampleDataFn { # avoids fhem from crash if table 'current' is not present and attr DbLogType is set to /Current/ my $prescurr = eval {$dbhf->selectrow_array("select count(*) from $current");} || 0; Log3($dlName, 5, "DbLog $dlName: Table $current present : $prescurr (0 = not present or no content)"); - + if($currentPresent =~ m/Current|SampleFill/ && $prescurr) { # Table Current present, use it for sample data my $query = "select device,reading from $current where device <> '' group by device,reading"; - my $sth = $dbhf->prepare( $query ); + my $sth = $dbhf->prepare( $query ); $sth->execute(); while (my @line = $sth->fetchrow_array()) { $counter++; push (@example, join (" ",@line)) if($counter <= 8); # show max 8 examples push (@colregs, "$line[0]:$line[1]"); # push all eventTypes to selection list } - $dbhf->disconnect(); + $dbhf->disconnect(); my $cols = join(",", sort { "\L$a" cmp "\L$b" } @colregs); # $max = 8 if($max > 8); # auskommentiert 27.02.2018, Notwendigkeit unklar (forum:#76008) @@ -6344,7 +6349,7 @@ sub DbLog_sampleDataFn { push @htmlArr, $ret; } - } + } else { # Table Current not present, so create an empty input field push @example, "No sample data due to missing table '$current'"; @@ -6354,7 +6359,7 @@ sub DbLog_sampleDataFn { my @f = split(":", ($dlog->[$r] ? $dlog->[$r] : ":::"), 4); my $ret = ""; no warnings 'uninitialized'; # Forum:74690, bug unitialized - $ret .= SVG_txt("par_${r}_0", "", "$f[0]:$f[1]:$f[2]:$f[3]", 20); + $ret .= SVG_txt("par_${r}_0", "", "$f[0]:$f[1]:$f[2]:$f[3]", 20); use warnings; # $ret .= SVG_txt("par_${r}_2", "", $f[2], 1); # Default not yet implemented # $ret .= SVG_txt("par_${r}_3", "", $f[3], 3); # Function @@ -6371,9 +6376,9 @@ return ($desc, \@htmlArr, join("
", @example)); # Error handling, returns a JSON String ################################################################ sub DbLog_jsonError { - my $errormsg = $_[0]; + my $errormsg = $_[0]; my $json = '{"success": "false", "msg":"'.$errormsg.'"}'; - + return $json; } @@ -6381,21 +6386,21 @@ return $json; # Check Zeitformat # Zeitformat: YYYY-MM-DD HH:MI:SS ################################################################ -sub DbLog_checkTimeformat { +sub DbLog_checkTimeformat { my ($t) = @_; - + my (@datetime, @date, @time); @datetime = split(" ", $t); # Datum und Zeit auftrennen @date = split("-", $datetime[0]); @time = split(":", $datetime[1]); - + eval { timelocal($time[2], $time[1], $time[0], $date[2], $date[1]-1, $date[0]-1900); }; - + if ($@) { my $err = (split(" at ", $@))[0]; return $err; } - + return; } @@ -6410,12 +6415,12 @@ sub DbLog_prepareSql { $endtime =~ s/_/ /; my $device = $_[7]; my $userquery = $_[8]; - my $xaxis = $_[9]; - my $yaxis = $_[10]; - my $savename = $_[11]; + my $xaxis = $_[9]; + my $yaxis = $_[10]; + my $savename = $_[11]; my $jsonChartConfig = $_[12]; - my $pagingstart = $_[13]; - my $paginglimit = $_[14]; + my $pagingstart = $_[13]; + my $paginglimit = $_[14]; my $dbmodel = $hash->{MODEL}; my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; @@ -6452,7 +6457,7 @@ sub DbLog_prepareSql { $yearstats .= "AVG(VALUE::float) AS AVG, MIN(VALUE::float) AS MIN, MAX(VALUE::float) AS MAX, "; $yearstats .= "COUNT(VALUE) AS COUNT FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; $yearstats .= "AND TIMESTAMP Between '$starttime' AND '$endtime' GROUP BY 1 ORDER BY 1;"; - + } elsif ($dbmodel eq "MYSQL") { ### MYSQL Queries for Statistics ### ### hour: @@ -6493,7 +6498,7 @@ sub DbLog_prepareSql { $hourstats .= "MIN(CAST(VALUE AS FLOAT)) AS MIN, MAX(CAST(VALUE AS FLOAT)) AS MAX, COUNT(VALUE) AS COUNT "; $hourstats .= "FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; $hourstats .= "AND TIMESTAMP Between '$starttime' AND '$endtime' GROUP BY strftime('%Y-%m-%d %H:00:00', TIMESTAMP);"; - + ### day: $daystats = "SELECT TIMESTAMP, SUM(CAST(VALUE AS FLOAT)) AS SUM, AVG(CAST(VALUE AS FLOAT)) AS AVG, "; $daystats .= "MIN(CAST(VALUE AS FLOAT)) AS MIN, MAX(CAST(VALUE AS FLOAT)) AS MAX, COUNT(VALUE) AS COUNT "; @@ -6518,74 +6523,74 @@ sub DbLog_prepareSql { $yearstats .= "FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; $yearstats .= "AND TIMESTAMP Between '$starttime' AND '$endtime' GROUP BY strftime('%Y 00:00:00', TIMESTAMP);"; - } + } else { $sql = "errordb"; } if($userquery eq "getreadings") { $sql = "SELECT distinct(reading) FROM $history WHERE device = '".$device."'"; - } + } elsif($userquery eq "getdevices") { $sql = "SELECT distinct(device) FROM $history"; - } + } elsif($userquery eq "timerange") { $sql = "SELECT ".$xaxis.", VALUE FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' AND TIMESTAMP Between '$starttime' AND '$endtime' ORDER BY TIMESTAMP;"; - } + } elsif($userquery eq "hourstats") { $sql = $hourstats; - } + } elsif($userquery eq "daystats") { $sql = $daystats; - } + } elsif($userquery eq "weekstats") { $sql = $weekstats; - } + } elsif($userquery eq "monthstats") { $sql = $monthstats; - } + } elsif($userquery eq "yearstats") { $sql = $yearstats; - } + } elsif($userquery eq "savechart") { $sql = "INSERT INTO frontend (TYPE, NAME, VALUE) VALUES ('savedchart', '$savename', '$jsonChartConfig')"; - } + } elsif($userquery eq "renamechart") { $sql = "UPDATE frontend SET NAME = '$savename' WHERE ID = '$jsonChartConfig'"; - } + } elsif($userquery eq "deletechart") { $sql = "DELETE FROM frontend WHERE TYPE = 'savedchart' AND ID = '".$savename."'"; - } + } elsif($userquery eq "updatechart") { $sql = "UPDATE frontend SET VALUE = '$jsonChartConfig' WHERE ID = '".$savename."'"; - } + } elsif($userquery eq "getcharts") { $sql = "SELECT * FROM frontend WHERE TYPE = 'savedchart'"; - } + } elsif($userquery eq "getTableData") { if ($device ne '""' && $yaxis ne '""') { $sql = "SELECT * FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; $sql .= "AND TIMESTAMP Between '$starttime' AND '$endtime'"; - $sql .= " LIMIT '$paginglimit' OFFSET '$pagingstart'"; - $countsql = "SELECT count(*) FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; - $countsql .= "AND TIMESTAMP Between '$starttime' AND '$endtime'"; - } - elsif($device ne '""' && $yaxis eq '""') { + $sql .= " LIMIT '$paginglimit' OFFSET '$pagingstart'"; + $countsql = "SELECT count(*) FROM $history WHERE READING = '$yaxis' AND DEVICE = '$device' "; + $countsql .= "AND TIMESTAMP Between '$starttime' AND '$endtime'"; + } + elsif($device ne '""' && $yaxis eq '""') { $sql = "SELECT * FROM $history WHERE DEVICE = '$device' "; $sql .= "AND TIMESTAMP Between '$starttime' AND '$endtime'"; $sql .= " LIMIT '$paginglimit' OFFSET '$pagingstart'"; $countsql = "SELECT count(*) FROM $history WHERE DEVICE = '$device' "; $countsql .= "AND TIMESTAMP Between '$starttime' AND '$endtime'"; - } + } else { $sql = "SELECT * FROM $history"; - $sql .= " WHERE TIMESTAMP Between '$starttime' AND '$endtime'"; + $sql .= " WHERE TIMESTAMP Between '$starttime' AND '$endtime'"; $sql .= " LIMIT '$paginglimit' OFFSET '$pagingstart'"; - $countsql = "SELECT count(*) FROM $history"; - $countsql .= " WHERE TIMESTAMP Between '$starttime' AND '$endtime'"; + $countsql = "SELECT count(*) FROM $history"; + $countsql .= " WHERE TIMESTAMP Between '$starttime' AND '$endtime'"; } return ($sql, $countsql); - } + } else { $sql = "error"; } @@ -6613,34 +6618,34 @@ sub DbLog_chartQuery { return if(!$dbhf); my $totalcount; - + if (defined $countsql && $countsql ne "") { - my $query_handle = $dbhf->prepare($countsql) + my $query_handle = $dbhf->prepare($countsql) or return DbLog_jsonError("Could not prepare statement: " . $dbhf->errstr . ", SQL was: " .$countsql); - - $query_handle->execute() + + $query_handle->execute() or return DbLog_jsonError("Could not execute statement: " . $query_handle->errstr); my @data = $query_handle->fetchrow_array(); $totalcount = join(", ", @data); - + } # prepare the query - my $query_handle = $dbhf->prepare($sql) + my $query_handle = $dbhf->prepare($sql) or return DbLog_jsonError("Could not prepare statement: " . $dbhf->errstr . ", SQL was: " .$sql); - + # execute the query - $query_handle->execute() + $query_handle->execute() or return DbLog_jsonError("Could not execute statement: " . $query_handle->errstr); - + my $columns = $query_handle->{'NAME'}; my $columncnt; # When columns are empty but execution was successful, we have done a successful INSERT, UPDATE or DELETE if($columns) { $columncnt = scalar @$columns; - } + } else { return '{"success": "true", "msg":"All ok"}'; } @@ -6652,44 +6657,44 @@ sub DbLog_chartQuery { if($i == 0) { $jsonstring .= '{'; - } + } else { $jsonstring .= ',{'; - } - + } + for ($i = 0; $i < $columncnt; $i++) { $jsonstring .= '"'; - $jsonstring .= uc($query_handle->{NAME}->[$i]); + $jsonstring .= uc($query_handle->{NAME}->[$i]); $jsonstring .= '":'; if (defined $data[$i]) { my $fragment = substr($data[$i],0,1); if ($fragment eq "{") { $jsonstring .= $data[$i]; - } + } else { $jsonstring .= '"'.$data[$i].'"'; } - } + } else { $jsonstring .= '""' } - + if($i != ($columncnt -1)) { - $jsonstring .= ','; + $jsonstring .= ','; } } - $jsonstring .= '}'; + $jsonstring .= '}'; } $dbhf->disconnect(); $jsonstring .= ']'; if (defined $totalcount && $totalcount ne "") { $jsonstring .= ',"totalCount": '.$totalcount.'}'; - } + } else { $jsonstring .= '}'; } - + return $jsonstring; } @@ -6701,25 +6706,25 @@ sub DbLog_dbReadings { my($hash,@a) = @_; my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; - + my $dbhf = DbLog_ConnectNewDBH($hash); return if(!$dbhf); - + return 'Wrong Syntax for ReadingsVal!' unless defined($a[4]); my $DbLogType = AttrVal($a[0],'DbLogType','current'); my $query; if (lc($DbLogType) =~ m(current) ) { $query = "select VALUE,TIMESTAMP from $current where DEVICE= '$a[2]' and READING= '$a[3]'"; - } + } else { $query = "select VALUE,TIMESTAMP from $history where DEVICE= '$a[2]' and READING= '$a[3]' order by TIMESTAMP desc limit 1"; } my ($reading,$timestamp) = $dbhf->selectrow_array($query); - $dbhf->disconnect(); - + $dbhf->disconnect(); + $reading = (defined($reading)) ? $reading : $a[4]; $timestamp = (defined($timestamp)) ? $timestamp : $a[4]; - + return $reading if $a[1] eq 'ReadingsVal'; return $timestamp if $a[1] eq 'ReadingsTimestamp'; return "Syntax error: $a[1]"; @@ -6737,27 +6742,27 @@ sub DbLog_setVersionInfo { my $type = $hash->{TYPE}; $hash->{HELPER}{PACKAGE} = __PACKAGE__; $hash->{HELPER}{VERSION} = $v; - + if($modules{$type}{META}{x_prereqs_src} && !$hash->{HELPER}{MODMETAABSENT}) { # META-Daten sind vorhanden $modules{$type}{META}{version} = "v".$v; # Version aus META.json überschreiben, Anzeige mit {Dumper $modules{DbLog}{META}} if($modules{$type}{META}{x_version}) { # {x_version} ( nur gesetzt wenn $Id$ im Kopf komplett! vorhanden ) $modules{$type}{META}{x_version} =~ s/1\.1\.1/$v/xsg; - } + } else { - $modules{$type}{META}{x_version} = $v; + $modules{$type}{META}{x_version} = $v; } return $@ unless (FHEM::Meta::SetInternals($hash)); # FVERSION wird gesetzt ( nur gesetzt wenn $Id$ im Kopf komplett! vorhanden ) if(__PACKAGE__ eq "FHEM::$type" || __PACKAGE__ eq $type) { # es wird mit Packages gearbeitet -> Perl übliche Modulversion setzen # mit {->VERSION()} im FHEMWEB kann Modulversion abgefragt werden - use version 0.77; our $VERSION = FHEM::Meta::Get( $hash, 'version' ); + use version 0.77; our $VERSION = FHEM::Meta::Get( $hash, 'version' ); } - } + } else { # herkömmliche Modulstruktur $hash->{VERSION} = $v; } - + return; } @@ -6772,21 +6777,21 @@ sub DbLog_startShowChildhandles { my ($str) = @_; my ($name,$sub) = split(":",$str); my $hash = $defs{$name}; - - RemoveInternalTimer($hash, "DbLog_startShowChildhandles"); + + RemoveInternalTimer($hash, "DbLog_startShowChildhandles"); my $iv = AttrVal($name, "traceHandles", 0); return if(!$iv); - - my %drivers = DBI->installed_drivers(); + + my %drivers = DBI->installed_drivers(); DbLog_showChildHandles($name,$drivers{$_}, 0, $_) for (keys %drivers); - + InternalTimer(gettimeofday()+$iv, "DbLog_startShowChildhandles", "$name:$sub", 0) if($iv); return; } - + sub DbLog_showChildHandles { my ($name,$h, $level, $key) = @_; - + my $t = $h->{Type}."h"; $t = ($t=~/drh/)?"DriverHandle ":($t=~/dbh/)?"DatabaseHandle ":($t=~/sth/)?"StatementHandle":"Undefined"; Log3($name, 1, "DbLog $name - traceHandles (system wide) - Driver: ".$key.", ".$t.": ".("\t" x $level).$h); @@ -6802,22 +6807,23 @@ sub DbLog_showChildHandles { =item summary_DE loggt Events in eine Datenbank =begin html - +

DbLog

+
+
    -
    With DbLog events can be stored in a database. SQLite, MySQL/MariaDB and PostgreSQL are supported databases.

    - + Prereqisites

    - + The Perl-modules DBI and DBD::<dbtype> are needed to be installed (use cpan -i <module> - if your distribution does not have it). + if your distribution does not have it).

    - + On a debian based system you may install these modules for instance by:

    - +
      - +
      @@ -6827,33 +6833,33 @@ sub DbLog_showChildHandles {

      - + Preparations

      - + At first you need to install and setup the database. - The installation of database system itself is not described here, please refer to the installation instructions of your + The installation of database system itself is not described here, please refer to the installation instructions of your database.

      - + Note:
      - In case of fresh installed MySQL/MariaDB system don't forget deleting the anonymous "Everyone"-User with an admin-tool if - existing ! -

      - - Sample code and Scripts to prepare a MySQL/PostgreSQL/SQLite database you can find in - SVN -> contrib/dblog/db_create_<DBType>.sql.
      - (Caution: The local FHEM-Installation subdirectory ./contrib/dblog doesn't contain the freshest scripts !!) + In case of fresh installed MySQL/MariaDB system don't forget deleting the anonymous "Everyone"-User with an admin-tool if + existing !

      - + + Sample code and Scripts to prepare a MySQL/PostgreSQL/SQLite database you can find in + SVN -> contrib/dblog/db_create_<DBType>.sql.
      + (Caution: The local FHEM-Installation subdirectory ./contrib/dblog doesn't contain the freshest scripts !!) +

      + The database contains two tables: current and history.
      - The latter contains all events whereas the former only contains the last event for any given reading and device. - Please consider the attribute DbLogType implicitly to determine the usage of tables + The latter contains all events whereas the former only contains the last event for any given reading and device. + Please consider the DbLogType implicitly to determine the usage of tables current and history.

      - + The columns have the following meaning:

      - +
        -
      DBI : sudo apt-get install libdbi-perl
      MySQL : sudo apt-get install [mysql-server] mysql-client libdbd-mysql libdbd-mysql-perl (mysql-server only if you use a local MySQL-server installation)
      +
      @@ -6866,18 +6872,18 @@ sub DbLog_showChildHandles {

      - + create index
      Due to reading performance, e.g. on creation of SVG-plots, it is very important that the index "Search_Idx" - or a comparable index (e.g. a primary key) is applied. + or a comparable index (e.g. a primary key) is applied. A sample code for creation of that index is also available in mentioned scripts of - SVN -> contrib/dblog/db_create_<DBType>.sql. + SVN -> contrib/dblog/db_create_<DBType>.sql.

      - + The index "Search_Idx" can be created, e.g. in database 'fhem', by these statements (also subsequently):

      - +
        -
      TIMESTAMP : timestamp of event, e.g. 2007-12-30 21:45:22
      DEVICE : device name, e.g. Wetterstation
      +
      @@ -6885,124 +6891,126 @@ sub DbLog_showChildHandles {
      MySQL : CREATE INDEX Search_Idx ON `fhem`.`history` (DEVICE, READING, TIMESTAMP);
      SQLite : CREATE INDEX Search_Idx ON `history` (DEVICE, READING, TIMESTAMP);

    - - For the connection to the database a configuration file is used. + + For the connection to the database a configuration file is used. The configuration is stored in a separate file to avoid storing the password in the main configuration file and to have it visible in the output of the list command.

    - - The configuration file should be copied e.g. to /opt/fhem and has the following structure you have to customize + + The configuration file should be copied e.g. to /opt/fhem and has the following structure you have to customize suitable to your conditions (decomment the appropriate raws and adjust it):

    - +
         ####################################################################################
    -    # database configuration file     
    -    #   
    +    # database configuration file
    +    #
         # NOTE:
         # If you don't use a value for user / password please delete the leading hash mark
    -    # and write 'user => ""' respectively 'password => ""' instead !    
    +    # and write 'user => ""' respectively 'password => ""' instead !
         #
         #
    -    ## for MySQL                                                      
    +    ## for MySQL
         ####################################################################################
    -    #%dbconfig= (                                                    
    -    #    connection => "mysql:database=fhem;host=<database host>;port=3306",       
    -    #    user => "fhemuser",                                          
    +    #%dbconfig= (
    +    #    connection => "mysql:database=fhem;host=<database host>;port=3306",
    +    #    user => "fhemuser",
         #    password => "fhempassword",
    -    #    # optional enable(1) / disable(0) UTF-8 support 
    -    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing 
    -    #    # 4.042 is highly suggested)   
    -    #    utf8 => 1   
    -    #);                                                              
    +    #    # optional enable(1) / disable(0) UTF-8 support
    +    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing
    +    #    # 4.042 is highly suggested)
    +    #    utf8 => 1
    +    #);
         ####################################################################################
    -    #                                                                
    -    ## for PostgreSQL                                                
    +    #
    +    ## for PostgreSQL
         ####################################################################################
    -    #%dbconfig= (                                                   
    -    #    connection => "Pg:database=fhem;host=<database host>",        
    -    #    user => "fhemuser",                                     
    -    #    password => "fhempassword"                              
    -    #);                                                              
    +    #%dbconfig= (
    +    #    connection => "Pg:database=fhem;host=<database host>",
    +    #    user => "fhemuser",
    +    #    password => "fhempassword"
    +    #);
         ####################################################################################
    -    #                                                                
    -    ## for SQLite (username and password stay empty for SQLite)      
    +    #
    +    ## for SQLite (username and password stay empty for SQLite)
         ####################################################################################
    -    #%dbconfig= (                                                   
    -    #    connection => "SQLite:dbname=/opt/fhem/fhem.db",        
    -    #    user => "",                                             
    -    #    password => ""                                          
    -    #);                                                              
    +    #%dbconfig= (
    +    #    connection => "SQLite:dbname=/opt/fhem/fhem.db",
    +    #    user => "",
    +    #    password => ""
    +    #);
         ####################################################################################
         
    If configDB is used, the configuration file has to be uploaded into the configDB !

    - + Note about special characters:
    - If special characters, e.g. @,$ or % which have a meaning in the perl programming + If special characters, e.g. @,$ or % which have a meaning in the perl programming language are used in a password, these special characters have to be escaped. - That means in this example you have to use: \@,\$ respectively \%. + That means in this example you have to use: \@,\$ respectively \%.


    - - Define -
      + + + Define
      - - define <name> DbLog <configfilename> <regexp> -

      +
      + +
        + + define <name> DbLog <configfilename> <regexp>

        <configfilename> is the prepared configuration file.
        <regexp> is identical to the specification of regex in the FileLog definition.

        - + Example:
          define myDbLog DbLog /etc/fhem/db.conf .*:.*
          all events will stored into the database

        - + After you have defined your DbLog-device it is recommended to run the configuration check

          set <name> configCheck

        - - This check reports some important settings and gives recommendations back to you if proposals are indentified. + + This check reports some important settings and gives recommendations back to you if proposals are indentified.

        - - DbLog distinguishes between the synchronous (default) and asynchronous logmode. The logmode is adjustable by the - attribute asyncMode. Since version 2.13.5 DbLog is supporting primary key (PK) set in table - current or history. If you want use PostgreSQL with PK it has to be at lest version 9.5. + + DbLog distinguishes between the synchronous (default) and asynchronous logmode. The logmode is adjustable by the + asyncMode. Since version 2.13.5 DbLog is supporting primary key (PK) set in table + current or history. If you want use PostgreSQL with PK it has to be at lest version 9.5.

        - + The content of VALUE will be optimized for automated post-processing, e.g. yes is translated to 1

        - + The stored values can be retrieved by the following code like FileLog:
          get myDbLog - - 2012-11-10 2012-11-10 KS300:temperature::

        - + transfer FileLog-data to DbLog

        There is the special module 98_FileLogConvert.pm available to transfer filelog-data to the DbLog-database.
        The module can be downloaded here or from directory ./contrib instead. - Further information and help you can find in the corresponding + Further information and help you can find in the corresponding Forumthread .


        - + Reporting and Management of DbLog database content

        By using SVG database content can be visualized.
        - Beyond that the module DbRep can be used to prepare tabular - database reports or you can manage the database content with available functions of that module. + Beyond that the module DbRep can be used to prepare tabular + database reports or you can manage the database content with available functions of that module.


        - + Troubleshooting

        - If after successful definition the DbLog-device doesn't work as expected, the following notes may help: + If after successful definition the DbLog-device doesn't work as expected, the following notes may help:

        - +
        • Have the preparatory steps as described in commandref been done ? (install software components, create tables and index)
        • Was "set <name> configCheck" executed after definition and potential errors fixed or rather the hints implemented ?
        • @@ -7010,211 +7018,286 @@ sub DbLog_showChildHandles {
        • When creating a SVG-plot and no drop-down list with proposed values appear -> set attribute "DbLogType" to "Current/History".

        - + If the notes don't lead to success, please increase verbose level of the DbLog-device to 4 or 5 and observe entries in logfile relating to the DbLog-device. - - For problem analysis please post the output of "list <name>", the result of "set <name> configCheck" and the + + For problem analysis please post the output of "list <name>", the result of "set <name> configCheck" and the logfile entries of DbLog-device to the forum thread.

        - +

      + + + Set +

      - - - Set
        - set <name> addCacheLine YYYY-MM-DD HH:MM:SS|<device>|<type>|<event>|<reading>|<value>|[<unit>]

        -
          In asynchronous mode a new dataset is inserted to the Cache and will be processed at the next database sync cycle. +
        • set <name> addCacheLine YYYY-MM-DD HH:MM:SS|<device>|<type>|<event>|<reading>|<value>|[<unit>]

          + +
            + In asynchronous mode a new dataset is inserted to the Cache and will be processed at the next database sync cycle.

            - + Example:
            set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%
            -

          - - set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

          -
            Inserts an additional log entry of a device/reading combination into the database. Readings which are possibly specified - in attribute "DbLogExclude" (in source device) are not logged, unless they are enclosed in attribute "DbLogInclude" + + +
          +
          + +
        • set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

          + +
            + Inserts an additional log entry of a device/reading combination into the database. Readings which are possibly specified + in attribute "DbLogExclude" (in source device) are not logged, unless they are enclosed in attribute "DbLogInclude" or addLog was called with option "!useExcludes".

            - +
              -
            • <devspec>:<Reading> - The device can be declared by a device specification +
            • <devspec>:<Reading> - The device can be declared by a device specification (devspec). "Reading" will be evaluated as regular expression. If The reading isn't available and the value "Value" is specified, the - reading will be added to database as new one if it isn't a regular + reading will be added to database as new one if it isn't a regular expression and the readingname is valid.
            • -
            • Value - Optionally you can enter a "Value" that is used as reading value in the dataset. If the value isn't - specified (default), the current value of the specified reading will be inserted into the database.
            • -
            • CN=<caller name> - By the key "CN=" (Caller Name) you can specify an additional string, +
            • Value - Optionally you can enter a "Value" that is used as reading value in the dataset. If the value isn't + specified (default), the current value of the specified reading will be inserted into the database.
            • +
            • CN=<caller name> - By the key "CN=" (Caller Name) you can specify an additional string, e.g. the name of a calling device (for example an at- or notify-device). - Via the function defined in attribute "valueFn" this key can be analyzed + Via the function defined in valueFn this key can be analyzed by the variable $CN. Thereby it is possible to control the behavior of the addLog dependend from - the calling source.
            • -
            • !useExcludes - The function considers attribute "DbLogExclude" in the source device if it is set. If the optional + the calling source.
            • +
            • !useExcludes - The function considers attribute "DbLogExclude" in the source device if it is set. If the optional keyword "!useExcludes" is set, the attribute "DbLogExclude" isn't considered.

            - + The database field "EVENT" will be filled with the string "addLog" automatically.
            The addLog-command dosn't create an additional event in your system !

            - + Examples:
            set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
            set <name> addLog TYPE=SSCam:state
            set <name> addLog MyWetter:(fc10.*|fc8.*)
            set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
            set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

            - + set <name> addLog USV:state CN=di.cronjob
            In the valueFn-function the caller "di.cronjob" is evaluated via the variable $CN and the timestamp is corrected:

            - valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ } - -

          - - set <name> clearReadings

          -
            This function clears readings which were created by different DbLog-functions.

          + valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ } - set <name> commitCache

          -
            In asynchronous mode (attribute asyncMode=1), the cached data in memory will be written into the database + +
          +
          + +
        • set <name> clearReadings

          +
            + This function clears readings which were created by different DbLog-functions. + +
          +
          + +
        • set <name> commitCache

          +
            + In asynchronous mode (asyncMode=1), the cached data in memory will be written into the database and subsequently the cache will be cleared. Thereby the internal timer for the asynchronous mode Modus will be set new. - The command can be usefull in case of you want to write the cached data manually or e.g. by an AT-device on a defined - point of time into the database.

          + The command can be usefull in case of you want to write the cached data manually or e.g. by an AT-device on a defined + point of time into the database. +
        • +
        +
        - set <name> configCheck

        -
          This command checks some important settings and give recommendations back to you if proposals are identified. -

        - - set <name> count

        -
          Count records in tables current and history and write results into readings countCurrent and countHistory.

        +
      • set <name> configCheck

        +
          + This command checks some important settings and give recommendations back to you if proposals are identified. + +
        +
        - set <name> countNbl

        -
          The non-blocking execution of "set <name> count".

        - - set <name> deleteOldDays <n>

        -
          Delete records from history older than <n> days. Number of deleted records will be written into reading +
        • set <name> count

          +
            + Count records in tables current and history and write results into readings countCurrent and countHistory. + +
          +
          + +
        • set <name> countNbl

          +
            + The non-blocking execution of "set <name> count". + +
          +
          + +
        • set <name> deleteOldDays <n>

          +
            + Delete records from history older than <n> days. Number of deleted records will be written into reading lastRowsDeleted. -

          - - set <name> deleteOldDaysNbl <n>

          -
            - Is identical to function "deleteOldDays" whereupon deleteOldDaysNbl will be executed non-blocking. + +
          +
          + +
        • set <name> deleteOldDaysNbl <n>

          +
            + Is identical to function "deleteOldDays" whereupon deleteOldDaysNbl will be executed non-blocking.

            - + Note:
            Even though the function itself is non-blocking, you have to set DbLog into the asynchronous mode (attr asyncMode = 1) to avoid a blocking situation of FHEM ! - -
          -
          - set <name> eraseReadings

          -
            This function deletes all readings except reading "state".

          +
        • +
        +
        - - set <name> exportCache [nopurge | purgecache]

        -
          If DbLog is operating in asynchronous mode, it's possible to exoprt the cache content into a textfile. +
        • set <name> eraseReadings

          +
            + This function deletes all readings except reading "state". + +
          +
          + + +
        • set <name> exportCache [nopurge | purgecache]

          +
            + If DbLog is operating in asynchronous mode, it's possible to exoprt the cache content into a textfile. The file will be written to the directory (global->modpath)/log/ by default setting. The detination directory can be - changed by the attribute expimpdir.
            + changed by the expimpdir.
            The filename will be generated automatically and is built by a prefix "cache_", followed by DbLog-devicename and the present timestmp, e.g. "cache_LogDB_2017-03-23_22-13-55".
            - There are two options possible, "nopurge" respectively "purgecache". The option determines whether the cache content + There are two options possible, "nopurge" respectively "purgecache". The option determines whether the cache content will be deleted after export or not. Using option "nopurge" (default) the cache content will be preserved.
            - The attribute "exportCacheAppend" defines, whether every export process creates a new export file - (default) or the cache content is appended to an existing (newest) export file. -

          - - set <name> importCachefile <file>

          -
            Imports an textfile into the database which has been written by the "exportCache" function. - The allocatable files will be searched in directory (global->modpath)/log/ by default and a drop-down list will be + The exportCacheAppend defines, whether every export process creates a new export file + (default) or the cache content is appended to an existing (newest) export file. + +
          +
          + +
        • set <name> importCachefile <file>

          +
            + Imports an textfile into the database which has been written by the "exportCache" function. + The allocatable files will be searched in directory (global->modpath)/log/ by default and a drop-down list will be generated from the files which are found in the directory. - The source directory can be changed by the attribute expimpdir.
            - Only that files will be shown which are correlate on pattern starting with "cache_", followed by the DbLog-devicename.
            + The source directory can be changed by the expimpdir.
            + Only that files will be shown which are correlate on pattern starting with "cache_", followed by the DbLog-devicename.
            For example a file with the name "cache_LogDB_2017-03-23_22-13-55", will match if Dblog-device has name "LogDB".
            - After the import has been successfully done, a prefix "impdone_" will be added at begin of the filename and this file + After the import has been successfully done, a prefix "impdone_" will be added at begin of the filename and this file ddoesn't appear on the drop-down list anymore.
            - If you want to import a cachefile from another source database, you may adapt the filename so it fits the search criteria - "DbLog-Device" in its name. After renaming the file appeares again on the drop-down list.

          + If you want to import a cachefile from another source database, you may adapt the filename so it fits the search criteria + "DbLog-Device" in its name. After renaming the file appeares again on the drop-down list. +
        • +
        +
        - set <name> listCache

        -
          If DbLog is set to asynchronous mode (attribute asyncMode=1), you can use that command to list the events are cached in memory.

        +
      • set <name> listCache

        +
          + If DbLog is set to asynchronous mode (attribute asyncMode=1), you can use that command to list the events are + cached in memory. + +
        +
        - set <name> purgeCache

        -
          In asynchronous mode (attribute asyncMode=1), the in memory cached data will be deleted. - With this command data won't be written from cache into the database.

        - - set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

        -
          Reduces records older than <no> days and (optional) newer than <nn> days to one record (the 1st) each hour per device & reading.
          - Within the device/reading name SQL-Wildcards "%" and "_" can be used.

          - - With the optional argument 'average' not only the records will be reduced, but all numerical values of an hour - will be reduced to a single average.
          - With the optional argument 'average=day' not only the records will be reduced, but all numerical values of a - day will be reduced to a single average. (implies 'average')

          - - You can optional set the last argument to "exclude=device1:reading1,device2:reading2,..." to exclude - device/readings from reduceLog.
          - Also you can optional set the last argument to "include=device:reading" to delimit the SELECT statement which - is executed on the database. This may reduce the system RAM load and increases the performance.

          - -
            - Example:
            - set <name> reduceLog 270 average include=Luftdaten_remote:%
            -
          -
          - - CAUTION: It is strongly recommended to check if the default INDEX 'Search_Idx' exists on the table 'history'!
          - The execution of this command may take (without INDEX) extremely long. FHEM will be blocked completely after issuing the command to completion !

          - -

        - - set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

        -
          Same function as "set <name> reduceLog" but FHEM won't be blocked due to this function is implemented +
        • set <name> purgeCache

          +
            + In asynchronous mode (asyncMode=1), the in memory cached data will be deleted. + With this command data won't be written from cache into the database. + +
          +
          + +
        • set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

          +
            + Reduces records older than <no> days and (optional) newer than <nn> days to one record (the 1st) each hour per device & reading.
            + Within the device/reading name SQL-Wildcards "%" and "_" can be used.

            + + With the optional argument 'average' not only the records will be reduced, but all numerical values of an hour + will be reduced to a single average.
            + With the optional argument 'average=day' not only the records will be reduced, but all numerical values of a + day will be reduced to a single average. (implies 'average')

            + + You can optional set the last argument to "exclude=device1:reading1,device2:reading2,..." to exclude + device/readings from reduceLog.
            + Also you can optional set the last argument to "include=device:reading" to delimit the SELECT statement which + is executed on the database. This may reduce the system RAM load and increases the performance.

            + +
              + Example:
              + set <name> reduceLog 270 average include=Luftdaten_remote:%
              +
            +
            + + CAUTION: It is strongly recommended to check if the default INDEX 'Search_Idx' exists on the table 'history'!
            + The execution of this command may take (without INDEX) extremely long. FHEM will be blocked completely after issuing the command to completion !

            + + +
          +
          + +
        • set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

          +
            + Same function as "set <name> reduceLog" but FHEM won't be blocked due to this function is implemented non-blocking !

            - + Note:
            Even though the function itself is non-blocking, you have to set DbLog into the asynchronous mode (attr asyncMode = 1) to avoid a blocking situation of FHEM ! - -

          - set <name> reopen [n]

          -
            Perform a database disconnect and immediate reconnect to clear cache and flush journal file if no time [n] was set.
            - If optionally a delay time of [n] seconds was set, the database connection will be disconnect immediately but it was only reopened - after [n] seconds. In synchronous mode the events won't saved during that time. In asynchronous mode the events will be - stored in the memory cache and saved into database after the reconnect was done.

          +
        • +
        +
        - set <name> rereadcfg

        -
          Perform a database disconnect and immediate reconnect to clear cache and flush journal file.
          - Probably same behavior als reopen, but rereadcfg will read the configuration data before reconnect.

        - - set <name> userCommand <validSqlStatement>

        +
      • set <name> reopen [n]

          - Performs simple sql select statements on the connected database. Usercommand and result will be written into - corresponding readings.
          - The result can only be a single line. - The execution of SQL-Statements in DbLog is outdated. Therefore the analysis module - DbRep should be used.
          -

        + Perform a database disconnect and immediate reconnect to clear cache and flush journal file if no time [n] was set.
        + If optionally a delay time of [n] seconds was set, the database connection will be disconnect immediately but it was only reopened + after [n] seconds. In synchronous mode the events won't saved during that time. In asynchronous mode the events will be + stored in the memory cache and saved into database after the reconnect was done. +
      • +
      +
      -

    +
  • set <name> rereadcfg

    +
      + Perform a database disconnect and immediate reconnect to clear cache and flush journal file.
      + Probably same behavior als reopen, but rereadcfg will read the configuration data before reconnect. + +
    +
    + +
  • set <name> userCommand <validSqlStatement>

    +
      + Performs simple sql select statements on the connected database. Usercommand and result will be written into + corresponding readings.
      + The result can only be a single line. + The execution of SQL-Statements in DbLog is outdated. Therefore the analysis module + DbRep should be used.
      + +
    +
    - - Get -
      - get <name> ReadingsVal       <device> <reading> <default>
      - get <name> ReadingsTimestamp <device> <reading> <default>
      -
      - Retrieve one single value, use and syntax are similar to ReadingsVal() and ReadingsTimestamp() functions.
    -
    -
    +
    + + + Get +
    +
    + +
      +
    • get <name> ReadingsVal <device> <reading> <default>
    • +
    • get <name> ReadingsTimestamp <device> <reading> <default>

      + + Retrieve one single value, use and syntax are similar to ReadingsVal() and ReadingsTimestamp() functions. + +
    • +
    +
    +
      - get <name> <infile> <outfile> <from> - <to> <column_spec> -

      +
    • get <name> <infile> <outfile> <from> + <to> <column_spec>

      + Read data from the Database, used by frontends to plot data without direct access to the Database.
      @@ -7225,7 +7308,7 @@ sub DbLog_showChildHandles {
    • current: reading actual readings from table "current"
    • history: reading history readings from table "history"
    • -: identical to "history"
    • -
    +
  • <out>
    A dummy parameter for FileLog compatibility. Setting by default to - @@ -7295,7 +7378,7 @@ sub DbLog_showChildHandles {
    • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature
    • get myDbLog current ALL - - %:temperature

    • - you will get all actual readings "temperature" from all logged devices. + you will get all actual readings "temperature" from all logged devices. Be careful by using "history" as inputfile because a long execution time will be expected!
    • get myDbLog - - 2012-11-10_10 2012-11-10_20 KS300:temperature::int1
      like from 10am until 08pm at 10.11.2012
    • @@ -7308,18 +7391,19 @@ sub DbLog_showChildHandles { and output for port B is like this: 2012-11-20_10:23:54 66.647
    • get DbLog - - 2013-05-26 2013-05-28 Pumpe:data::delta-ts:$val=~s/on/hide/
      Setting up a "Counter of Uptime". The function delta-ts gets the seconds between the last and the - actual logentry. The keyword "hide" will hide the logentry of "on" because this time + actual logentry. The keyword "hide" will hide the logentry of "on" because this time is a "counter of Downtime"
    -

    - +
  • + +
    Get when used for webcharts
      - get <name> <infile> <outfile> <from> - <to> <device> <querytype> <xaxis> <yaxis> <savename> -

      +
    • get <name> <infile> <outfile> <from> + <to> <device> <querytype> <xaxis> <yaxis> <savename>

      + Query the Database to retrieve JSON-Formatted Data, which is used by the charting frontend.
      @@ -7378,48 +7462,51 @@ sub DbLog_showChildHandles { Retrieves charting data, which requires a given xaxis, yaxis, device, to and from
      Will ouput a JSON like this: [{'TIMESTAMP':'2013-02-11 00:10:10','VALUE':'0.22431388090756'},{'TIMESTAMP'.....}]
    • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e savechart TIMESTAMP day_kwh tageskwh
      - Will save a chart in the database with the given name and the chart configuration parameters
    • + Will save a chart in the database with the given name and the chart configuration parameters
    • get logdb - webchart "" "" "" deletechart "" "" 7
      Will delete a chart from the database with the given id
    -

    - - - - Attributes -

    - + + +
    + + + Attributes +
    +
    +
      - +
    • addStateEvent
        attr <device> addStateEvent [0|1] -
        +

        + As you probably know the event associated with the state Reading is special, as the "state: " string is stripped, i.e event is not "state: on" but just "on".
        Mostly it is desireable to get the complete event without "state: " stripped, so it is the default behavior of DbLog. That means you will get state-event complete as "state: xxx".
        In some circumstances, e.g. older or special modules, it is a good idea to set addStateEvent to "0". - Try it if you have trouble with the default adjustment. + Try it if you have trouble with the default adjustment.

    - +
      - +
    • asyncMode
        attr <device> asyncMode [1|0] -
        - - This attribute determines the operation mode of DbLog. If asynchronous mode is active (asyncMode=1), the events which should be saved - at first will be cached in memory. After synchronisation time cycle (attribute syncInterval), or if the count limit of datasets in cache +

        + + This attribute determines the operation mode of DbLog. If asynchronous mode is active (asyncMode=1), the events which should be saved + at first will be cached in memory. After synchronisation time cycle (attribute syncInterval), or if the count limit of datasets in cache is reached (attribute cacheLimit), the cached events get saved into the database using bulk insert. - If the database isn't available, the events will be cached in memeory furthermore, and tried to save into database again after + If the database isn't available, the events will be cached in memeory furthermore, and tried to save into database again after the next synchronisation time cycle if the database is available.
        - In asynchronous mode the data insert into database will be executed non-blocking by a background process. + In asynchronous mode the data insert into database will be executed non-blocking by a background process. You can adjust the timeout value for this background process by attribute "timeout" (default 86400s).
        In synchronous mode (normal mode) the events won't be cached im memory and get saved into database immediately. If the database isn't available the events are get lost.
        @@ -7427,35 +7514,35 @@ sub DbLog_showChildHandles {

      - +
        - +
      • bulkInsert
          attr <device> bulkInsert [1|0] -
          - - Toggles the Insert mode between Array (default) and Bulk. This Bulk insert mode increase the write performance +

          + + Toggles the Insert mode between Array (default) and Bulk. This Bulk insert mode increase the write performance into the history table significant in case of plenty of data to insert, especially if asynchronous mode is - used. + used. To get the whole improved performance, the attribute "DbLogType" should not contain the current table in this use case.

      - +
        - +
      • commitMode
          attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] -
          - +

          + Change the usage of database autocommit- and/or transaction- behavior.
          - If transaction "off" is used, not saved datasets are not returned to cache in asynchronous mode.
          + If transaction "off" is used, not saved datasets are not returned to cache in asynchronous mode.
          This attribute is an advanced feature and should only be used in a concrete situation or support case.

          - +
          • basic_ta:on - autocommit server basic setting / transaktion on (default)
          • basic_ta:off - autocommit server basic setting / transaktion off
          • @@ -7463,78 +7550,79 @@ sub DbLog_showChildHandles {
          • ac:on_ta:off - autocommit on / transaktion off
          • ac:off_ta:on - autocommit off / transaktion on (autocommit "off" set transaktion "on" implicitly)
          - +

        - +
      • cacheEvents
          attr <device> cacheEvents [2|1|0] -
          +

          +
          • cacheEvents=1: creates events of reading CacheUsage at point of time when a new dataset has been added to the cache.
          • -
          • cacheEvents=2: creates events of reading CacheUsage at point of time when in aychronous mode a new write cycle to the - database starts. In that moment CacheUsage contains the number of datasets which will be written to +
          • cacheEvents=2: creates events of reading CacheUsage at point of time when in aychronous mode a new write cycle to the + database starts. In that moment CacheUsage contains the number of datasets which will be written to the database.


      - +
        - +
      • cacheLimit
          - attr <device> cacheLimit <n> -
          - + attr <device> cacheLimit <n> +

          + In asynchronous logging mode the content of cache will be written into the database and cleared if the number <n> datasets - in cache has reached (default: 500). Thereby the timer of asynchronous logging mode will be set new to the value of + in cache has reached (default: 500). Thereby the timer of asynchronous logging mode will be set new to the value of attribute "syncInterval". In case of error the next write attempt will be started at the earliest after syncInterval/2.

      - +
        - +
      • cacheOverflowThreshold
          - attr <device> cacheOverflowThreshold <n> -
          - - In asynchronous log mode, sets the threshold of <n> records above which the cache contents are exported to a file + attr <device> cacheOverflowThreshold <n> +

          + + In asynchronous log mode, sets the threshold of <n> records above which the cache contents are exported to a file instead of writing the data to the database.
          - The function corresponds to the "exportCache purgecache" set command and uses its settings.
          - With this attribute an overload of the server memory can be prevented if the database is not available for a longer period of time. - time (e.g. in case of error or maintenance). If the attribute value is smaller or equal to the value of the + The function corresponds to the "exportCache purgecache" set command and uses its settings.
          + With this attribute an overload of the server memory can be prevented if the database is not available for a longer period of time. + time (e.g. in case of error or maintenance). If the attribute value is smaller or equal to the value of the attribute "cacheLimit", the value of "cacheLimit" is used for "cacheOverflowThreshold".
          In this case, the cache will always be written to a file instead of to the database if the threshold value has been reached.
          - Thus, the data can be specifically written to one or more files with this setting, in order to import them into the + Thus, the data can be specifically written to one or more files with this setting, in order to import them into the database at a later time with the set command "importCachefile".

      - +
        - +
      • colEvent
          - attr <device> colEvent <n> -
          - + attr <device> colEvent <n> +

          + The field length of database field EVENT will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field + adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field EVENT won't be filled .
          Note:
          If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
          @@ -7542,17 +7630,17 @@ sub DbLog_showChildHandles {

        - +
          - +
        • colReading
            - attr <device> colReading <n> -
            - + attr <device> colReading <n> +

            + The field length of database field READING will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colReading=0 is set, the database field + adjusted if the field length in the databse was changed nanually. If colReading=0 is set, the database field READING won't be filled .
            Note:
            If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
            @@ -7560,17 +7648,17 @@ sub DbLog_showChildHandles {

          - +
            - +
          • colValue
              - attr <device> colValue <n> -
              - + attr <device> colValue <n> +

              + The field length of database field VALUE will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field + adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field VALUE won't be filled .
              Note:
              If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
              @@ -7578,65 +7666,76 @@ sub DbLog_showChildHandles {

            - +
              - +
            • DbLogType
                attr <device> DbLogType [Current|History|Current/History] -
                - - This attribute determines which table or which tables in the database are wanted to use. If the attribute isn't set, +

                + + This attribute determines which table or which tables in the database are wanted to use. If the attribute isn't set, the adjustment history will be used as default.
                - - + + The meaning of the adjustments in detail are:

                - +
                  - +
                  - - -
                  Current Events are only logged into the current-table. +
                  Current Events are only logged into the current-table. The entries of current-table will evaluated with SVG-creation.
                  History Events are only logged into the history-table. No dropdown list with proposals will created with the +
                  History Events are only logged into the history-table. No dropdown list with proposals will created with the SVG-creation.
                  Current/History Events will be logged both the current- and the history-table. +
                  Current/History Events will be logged both the current- and the history-table. The entries of current-table will evaluated with SVG-creation.
                  SampleFill/History Events are only logged into the history-table. The entries of current-table will evaluated with SVG-creation - and can be filled up with a customizable extract of the history-table by using a + and can be filled up with a customizable extract of the history-table by using a DbRep-device command "set <DbRep-name> tableCurrentFillup" (advanced feature).


                - + Note:
                The current-table has to be used to get a Device:Reading-DropDown list when a SVG-Plot will be created.

            - +
              - +
            • DbLogSelectionMode
                attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] -
                - - Thise DbLog-Device-Attribute specifies how the device specific Attributes DbLogExclude and DbLogInclude are handled. - If this Attribute is missing it defaults to "Exclude".

                -
                  -
                • Exclude: DbLog behaves just as usual. This means everything specified in the regex in DEF will be logged by default and anything excluded - via the DbLogExclude attribute will not be logged
                • -
                • Include: Nothing will be logged, except the readings specified via regex in the DbLogInclude attribute - (in source devices). - Neither the Regex set in DEF will be considered nor the device name of the source device itself.
                • -
                • Exclude/Include: Just almost the same as Exclude, but if the reading matches the DbLogExclude attribute, then - it will further be checked against the regex in DbLogInclude whicht may possibly re-include the already - excluded reading.
                • +

                  + + This attribute, specific to DbLog devices, influences how the device-specific attributes + DbLogExclude and DbLogInclude + are evaluated. DbLogExclude and DbLogInclude are set in the source devices.
                  + If the DbLogSelectionMode attribute is not set, "Exclude" is the default. +

                  + +
                    +
                  • Exclude: Readings are logged if they match the regex specified in the DEF. Excluded are + the readings that match the regex in the DbLogExclude attribute.
                    + The DbLogInclude attribute is not considered in this case. +
                  • +
                    +
                  • Include: Only readings are logged which are included via the regex in the attribute DbLogInclude + are included.
                    + The DbLogExclude attribute is not considered in this case, nor is the regex in DEF. +
                  • +
                    +
                  • Exclude/Include: Works basically like "Exclude", except that both the attribute DbLogExclude + attribute and the DbLogInclude attribute are checked. + Readings that were excluded by DbLogExclude, but are included by DbLogInclude + are therefore still included in the logging. +
                @@ -7644,22 +7743,44 @@ sub DbLog_showChildHandles {
                  - +
                • DbLogInclude
                    - attr <device> DbLogInclude regex:MinInterval[:force],[regex:MinInterval[:force]] ... -
                    - - A new Attribute DbLogInclude will be propagated to all Devices if DBLog is used. - DbLogInclude works just like DbLogExclude but to include matching readings. - If a MinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. - last value is equal. If the optional parameter "force" is set, the logentry is also dropped even though the value is not - equal the last one and the defined interval is not reached.
                    - See also the attributes defaultMinInterval and DbLogSelectionMode of DbLog device which takes influence on - how DbLogExclude and DbLogInclude are handled. + attr DbLogInclude Regex[:MinInterval][:force],[Regex[:MinInterval][:force]], ... +

                    + + The DbLogInclude attribute defines the readings to be stored in the database.
                    + The definition of the readings to be stored is done by a regular expression and all readings that match the regular + expression are stored in the database.
                    + + The optional <MinInterval> addition specifies that a value is saved when at least <MinInterval> + seconds have passed since the last save.
                    + + Regardless of the expiration of the interval, the reading is saved if the value of the reading has changed.
                    + With the optional modifier "force" the specified interval <MinInterval> can be forced to be kept even + if the value of the reading has changed since the last storage.

                    - + +
                      +
                      +        | Modifier |            within interval           | outside interval |
                      +        |          | Value equal        | Value changed   |                  |
                      +        |----------+--------------------+-----------------+------------------|
                      +        | <none>   | ignore             | store           | store            |
                      +        | force    | ignore             | ignore          | store            |
                      +      
                      +
                    + +
                    + Notes:
                    + The DbLogInclude attribute is propagated in all devices when DbLog is used.
                    + The DbLogSelectionMode attribute must be set accordingly + to enable DbLogInclude.
                    + With the defaultMinInterval attribute a default for + <MinInterval> can be specified. +

                    + Example
                    attr MyDevice1 DbLogInclude .*
                    attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300,battery:3600
                    @@ -7668,25 +7789,49 @@ sub DbLog_showChildHandles {

                  - +
                    - +
                  • DbLogExclude
                      - attr <device> DbLogExclude regex:MinInterval[:force],[regex:MinInterval[:force]] ... -
                      - - A new attribute DbLogExclude will be propagated to all devices if DBLog is used. - DbLogExclude will work as regexp to exclude defined readings to log. Each individual regexp-group are separated by - comma. - If a MinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. - lastvalue is equal. If the optional parameter "force" is set, the logentry is also dropped even though the value is not - equal the last one and the defined interval is not reached.
                      - See also the attributes defaultMinInterval and DbLogSelectionMode of DbLog device which takes influence on - how DbLogExclude and DbLogInclude are handled. + attr <device> DbLogExclude regex[:MinInterval][:force],[regex[:MinInterval][:force]] ... +

                      + + The DbLogExclude attribute defines the readings that should not be stored in the database.
                      + + The definition of the readings to be excluded is done via a regular expression and all readings matching the + regular expression are excluded from logging to the database.
                      + + Readings that have not been excluded via the regex are logged in the database. The behavior of the + storage is controlled with the following optional specifications.
                      + The optional <MinInterval> addition specifies that a value is saved when at least <MinInterval> + seconds have passed since the last storage.
                      + + Regardless of the expiration of the interval, the reading is saved if the value of the reading has changed.
                      + With the optional modifier "force" the specified interval <MinInterval> can be forced to be kept even + if the value of the reading has changed since the last storage.

                      - + +
                        +
                        +        | Modifier |            within interval           | outside interval |
                        +        |          | Value equal        | Value changed   |                  |
                        +        |----------+--------------------+-----------------+------------------|
                        +        | <none>   | ignore             | store           | store            |
                        +        | force    | ignore             | ignore          | store            |
                        +      
                        +
                      + +
                      + Notes:
                      + The DbLogExclude attribute is propagated in all devices when DbLog is used.
                      + The DbLogSelectionMode attribute can be set appropriately + to disable DbLogExclude.
                      + With the defaultMinInterval attribute a default for + <MinInterval> can be specified. +

                      + Example
                      attr MyDevice1 DbLogExclude .*
                      attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300,battery:3600
                      @@ -7695,33 +7840,33 @@ sub DbLog_showChildHandles {

                    - +
                      - +
                    • DbLogValueFn
                        attr <device> DbLogValueFn {} -
                        - - The attribute DbLogValueFn will be propagated to all devices if DbLog is used. - This attribute contains a Perl expression that can use and change values of $TIMESTAMP, $READING, $VALUE (value of +

                        + + The attribute DbLogValueFn will be propagated to all devices if DbLog is used. + This attribute contains a Perl expression that can use and change values of $TIMESTAMP, $READING, $VALUE (value of reading) and $UNIT (unit of reading value). That means the changed values are logged.
                        - Furthermore you have readonly access to $DEVICE (the source device name), $EVENT, $LASTTIMESTAMP and $LASTVALUE + Furthermore you have readonly access to $DEVICE (the source device name), $EVENT, $LASTTIMESTAMP and $LASTVALUE for evaluation in your expression. The variables $LASTTIMESTAMP and $LASTVALUE contain time and value of the last logged dataset of $DEVICE / $READING.
                        - If the $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't + If the $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't be changed. In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.
                        - The device specific function in "DbLogValueFn" is applied to the dataset before the potential existing attribute + The device specific function in "DbLogValueFn" is applied to the dataset before the potential existing attribute "valueFn" in the DbLog device.

                        - + Example
                         attr SMA_Energymeter DbLogValueFn
                        -{ 
                        +{
                           if ($READING eq "Bezug_WirkP_Kosten_Diff"){
                             $UNIT="Diff-W";
                           }
                        @@ -7733,35 +7878,35 @@ attr SMA_Energymeter DbLogValueFn
                              
                    - +
                      - +
                    • dbSchema
                        attr <device> dbSchema <schema> -
                        - - This attribute is available for database types MySQL/MariaDB and PostgreSQL. The table names (current/history) are +

                        + + This attribute is available for database types MySQL/MariaDB and PostgreSQL. The table names (current/history) are extended by its database schema. It is an advanced feature and normally not necessary to set.

                    - +
                      - +
                    • defaultMinInterval
                        attr <device> defaultMinInterval <devspec>::<MinInterval>[::force],[<devspec>::<MinInterval>[::force]] ... -
                        - +

                        + With this attribute a default minimum interval for devspec is defined. - If a defaultMinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. + If a defaultMinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. lastvalue is equal.
                        - If the optional parameter "force" is set, the logentry is also dropped even though the value is not + If the optional parameter "force" is set, the logentry is also dropped even though the value is not equal the last one and the defined interval is not reached.
                        Potential set DbLogExclude / DbLogInclude specifications in source devices are having priority over defaultMinInterval and are not overwritten by this attribute.
                        @@ -7769,25 +7914,25 @@ attr SMA_Energymeter DbLogValueFn Examples
                        attr dblog defaultMinInterval .*::120::force
                        - # Events of all devices are logged only in case of 120 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.
                        + # Events of all devices are logged only in case of 120 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.
                        attr dblog defaultMinInterval (Weather|SMA)::300
                        - # Events of devices "Weather" and "SMA" are logged only in case of 300 seconds are elapsed to the last log entry (reading specific) and the value is equal to the last logged value.
                        + # Events of devices "Weather" and "SMA" are logged only in case of 300 seconds are elapsed to the last log entry (reading specific) and the value is equal to the last logged value.
                        attr dblog defaultMinInterval TYPE=CUL_HM::600::force
                        # Events of all devices of Type "CUL_HM" are logged only in case of 600 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.

                    - +
                      - +
                    • disable
                        attr <device> disable [0|1] -
                        - - Disables the DbLog device (1) or enables it (0). +

                        + + Disables the DbLog device (1) or enables it (0).
                    • @@ -7795,21 +7940,21 @@ attr SMA_Energymeter DbLogValueFn
                        - +
                      • excludeDevs
                          - attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> -
                          - - The device/reading-combinations "devspec1#Reading", "devspec2#Reading" up to "devspec.." are globally excluded from + attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> +

                          + + The device/reading-combinations "devspec1#Reading", "devspec2#Reading" up to "devspec.." are globally excluded from logging into the database.
                          - The specification of a reading is optional.
                          + The specification of a reading is optional.
                          Thereby devices are explicit and consequently excluded from logging without consideration of another excludes or - includes (e.g. in DEF). - The devices to exclude can be specified as device-specification. + includes (e.g. in DEF). + The devices to exclude can be specified as device-specification.

                          - + Examples
                          attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog @@ -7827,20 +7972,21 @@ attr SMA_Energymeter DbLogValueFn

                      • - +
                          - +
                        • expimpdir
                            - attr <device> expimpdir <directory> -
                            - - If the cache content will be exported by "exportCache" or the "importCachefile" - command, the file will be written into or read from that directory. The default directory is - "(global->modpath)/log/". - Make sure the specified directory is existing and writable.

                            - + attr <device> expimpdir <directory> +


                            + + If the cache content will be exported by exportCache command, + the file will be written into or read from that directory. The default directory is + "(global->modpath)/log/". + Make sure the specified directory is existing and writable. +

                            + Example
                            attr <device> expimpdir /opt/fhem/cache/ @@ -7849,15 +7995,15 @@ attr SMA_Energymeter DbLogValueFn

                          - +
                            - +
                          • exportCacheAppend
                              attr <device> exportCacheAppend [1|0] -
                              - +


                              + If set, the export of cache ("set <device> exportCache") appends the content to the newest available export file. If there is no exististing export file, it will be new created.
                              If the attribute not set, every export process creates a new export file . (default)
                              @@ -7865,42 +8011,42 @@ attr SMA_Energymeter DbLogValueFn

                            - +
                              - +
                            • noNotifyDev
                                attr <device> noNotifyDev [1|0] -
                                - +

                                + Enforces that NOTIFYDEV won't set and hence won't used.

                            - +
                              - +
                            • noSupportPK
                                attr <device> noSupportPK [1|0] -
                                - +

                                + Deactivates the support of a set primary key by the module.

                            - +
                              - +
                            • SQLiteCacheSize
                                attr <device> SQLiteCacheSize <number of memory pages used for caching> -
                                +

                                The default is about 4MB of RAM to use for caching (page_size=1024bytes, cache_size=4000).
                                Embedded devices with scarce amount of RAM can go with 1000 pages or less. This will impact @@ -7912,12 +8058,12 @@ attr SMA_Energymeter DbLogValueFn
                                  - +
                                • SQLiteJournalMode
                                    attr <device> SQLiteJournalMode [WAL|off] -
                                    +

                                    Determines how SQLite databases are opened. Generally the Write-Ahead-Log (WAL) is the best choice for robustness and data integrity.
                                    @@ -7931,27 +8077,27 @@ attr SMA_Energymeter DbLogValueFn
                                      - +
                                    • syncEvents
                                        attr <device> syncEvents [1|0] -
                                        - +

                                        + events of reading syncEvents will be created.

                                    - +
                                      - +
                                    • showproctime
                                        attr <device> [1|0] -
                                        - - If set, the reading "sql_processing_time" shows the required execution time (in seconds) for the sql-requests. This is not calculated - for a single sql-statement, but the summary of all sql-statements necessary for within an executed DbLog-function in background. +

                                        + + If set, the reading "sql_processing_time" shows the required execution time (in seconds) for the sql-requests. This is not calculated + for a single sql-statement, but the summary of all sql-statements necessary for within an executed DbLog-function in background. The reading "background_processing_time" shows the total time used in background.
                                    • @@ -7959,90 +8105,91 @@ attr SMA_Energymeter DbLogValueFn
                                        - +
                                      • showNotifyTime
                                          attr <device> showNotifyTime [1|0] -
                                          - - If set, the reading "notify_processing_time" shows the required execution time (in seconds) in the DbLog +

                                          + + If set, the reading "notify_processing_time" shows the required execution time (in seconds) in the DbLog Notify-function. This attribute is practical for performance analyses and helps to determine the differences of time required when the operation mode was switched from synchronous to the asynchronous mode.
                                          - +

                                      - +
                                        - +
                                      • syncInterval
                                          attr <device> syncInterval <n> -
                                          - +

                                          + If DbLog is set to asynchronous operation mode (attribute asyncMode=1), with this attribute you can setup the interval in seconds used for storage the in memory cached events into the database. THe default value is 30 seconds.
                                          - +

                                      - +
                                        - +
                                      • suppressAddLogV3
                                          attr <device> suppressAddLogV3 [1|0] -
                                          - +

                                          + If set, verbose 3 Logfileentries done by the addLog-function will be suppressed.

                                      - +
                                        - +
                                      • suppressUndef
                                          attr <device> suppressUndef -
                                          - suppresses all undef values when returning data from the DB via get

                                          +

                                          - Example
                                          - #DbLog eMeter:power:::$val=($val>1500)?undef:$val + Suppresses all undef values when returning data from the DB via get. +

                                        - +
                                      • timeout
                                          attr <device> timeout <n> -
                                          +

                                          + setup timeout of the write cycle into database in asynchronous mode (default 86400s)

                                      - +
                                        - +
                                      • traceFlag
                                          attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> -
                                          - Trace flags are used to enable tracing of specific activities within the DBI and drivers. The attribute is only used for +

                                          + + Trace flags are used to enable tracing of specific activities within the DBI and drivers. The attribute is only used for tracing of errors in case of support.

                                          - +
                                            - +
                                            @@ -8054,40 +8201,41 @@ attr SMA_Energymeter DbLogValueFn
                                            ALL turn on all DBI and driver flags
                                            SQL trace SQL statements executed (Default)

                                          - +

                                      - +
                                        - +
                                      • traceHandles
                                          attr <device> traceHandles <n> -
                                          - +

                                          + If set, every <n> seconds the system wide existing database handles are printed out into the logfile. This attribute is only relevant in case of support. (default: 0 = switch off)
                                          - +

                                      - +
                                        - +
                                      • traceLevel
                                          attr <device> traceLevel <0|1|2|3|4|5|6|7> -
                                          +

                                          + Switch on the tracing function of the module.
                                          - Caution ! The attribute is only used for tracing errors or in case of support. If switched on very much entries + Caution ! The attribute is only used for tracing errors or in case of support. If switched on very much entries will be written into the FHEM Logfile !

                                          - +
                                            - +
                                            @@ -8100,43 +8248,44 @@ attr SMA_Energymeter DbLogValueFn
                                            0 Trace disabled. (Default)
                                            1 Trace top-level DBI method calls returning with results or errors.

                                          - +

                                      - +
                                        - +
                                      • useCharfilter
                                          attr <device> useCharfilter [0|1] -
                                          - If set, only ASCII characters from 32 to 126 are accepted in event. +

                                          + + If set, only ASCII characters from 32 to 126 are accepted in event. That are the characters " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~" .
                                          Mutated vowel and "€" are transcribed (e.g. ä to ae). (default: 0).

                                      - +
                                        - +
                                      • valueFn
                                          attr <device> valueFn {} -
                                          - - The attribute contains a Perl expression that can use and change values of $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, +

                                          + + The attribute contains a Perl expression that can use and change values of $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, $VALUE (value of reading) and $UNIT (unit of reading value).
                                          Furthermore you have readonly access to $EVENT, $LASTTIMESTAMP and $LASTVALUE for evaluation in your expression. The variables $LASTTIMESTAMP and $LASTVALUE contain time and value of the last logged dataset of $DEVICE / $READING.
                                          - If $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't + If $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't be changed. In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.

                                          - + Examples
                                          attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} @@ -8154,19 +8303,19 @@ attr SMA_Energymeter DbLogValueFn

                                        - +
                                          - +
                                        • verbose4Devs
                                            - attr <device> verbose4Devs <device1>,<device2>,<device..> -
                                            - + attr <device> verbose4Devs <device1>,<device2>,<device..> +


                                            + If verbose level 4 is used, only output of devices set in this attribute will be reported in FHEM central logfile. If this attribute isn't set, output of all relevant devices will be reported if using verbose level 4. The given devices are evaluated as Regex.

                                            - + Example
                                            attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global @@ -8183,22 +8332,23 @@ attr SMA_Energymeter DbLogValueFn =end html =begin html_DE - +

                                            DbLog

                                            +
                                            +
                                              -
                                              Mit DbLog werden Events in einer Datenbank gespeichert. Es wird SQLite, MySQL/MariaDB und PostgreSQL unterstützt.

                                              - + Voraussetzungen

                                              - + Die Perl-Module DBI und DBD::<dbtype> müssen installiert werden (use cpan -i <module> - falls die eigene Distribution diese nicht schon mitbringt). + falls die eigene Distribution diese nicht schon mitbringt).

                                              - + Auf einem Debian-System können diese Module z.Bsp. installiert werden mit:

                                              - +
                                                - +
                                                @@ -8208,35 +8358,35 @@ attr SMA_Energymeter DbLogValueFn

                                                - + Vorbereitungen

                                                - + Zunächst muss die Datenbank installiert und angelegt werden. - Die Installation des Datenbanksystems selbst wird hier nicht beschrieben. Dazu bitte nach den Installationsvorgaben des + Die Installation des Datenbanksystems selbst wird hier nicht beschrieben. Dazu bitte nach den Installationsvorgaben des verwendeten Datenbanksystems verfahren.

                                                - + Hinweis:
                                                Im Falle eines frisch installierten MySQL/MariaDB Systems bitte nicht vergessen die anonymen "Jeder"-Nutzer mit einem - Admin-Tool (z.B. phpMyAdmin) zu löschen falls sie existieren ! + Admin-Tool (z.B. phpMyAdmin) zu löschen falls sie existieren !

                                                - - Beispielcode bzw. Scripts zum Erstellen einer MySQL/PostgreSQL/SQLite Datenbank ist im + + Beispielcode bzw. Scripts zum Erstellen einer MySQL/PostgreSQL/SQLite Datenbank ist im SVN -> contrib/dblog/db_create_<DBType>.sql enthalten.
                                                - (Achtung: Die lokale FHEM-Installation enthält im Unterverzeichnis ./contrib/dblog nicht die aktuellsten + (Achtung: Die lokale FHEM-Installation enthält im Unterverzeichnis ./contrib/dblog nicht die aktuellsten Scripte !!)

                                                - + Die Datenbank beinhaltet 2 Tabellen: current und history.
                                                Die Tabelle current enthält den letzten Stand pro Device und Reading.
                                                In der Tabelle history sind alle Events historisch gespeichert.
                                                - Beachten sie bitte unbedingt das Attribut DbLogType um die Benutzung der Tabellen + Beachten sie bitte unbedingt das DbLogType um die Benutzung der Tabellen current und history festzulegen.

                                                - + Die Tabellenspalten haben folgende Bedeutung:

                                                - +
                                                  -
                                                DBI : sudo apt-get install libdbi-perl
                                                MySQL : sudo apt-get install [mysql-server] mysql-client libdbd-mysql libdbd-mysql-perl (mysql-server nur bei lokaler MySQL-Server-Installation)
                                                +
                                                @@ -8249,15 +8399,15 @@ attr SMA_Energymeter DbLogValueFn

                                                - + Index anlegen
                                                Für die Leseperformance, z.B. bei der Erstellung von SVG-PLots, ist es von besonderer Bedeutung dass der Index "Search_Idx" oder ein vergleichbarer Index (z.B. ein Primary Key) angelegt ist.

                                                - + Der Index "Search_Idx" kann mit diesen Statements, z.B. in der Datenbank 'fhem', angelegt werden (auch nachträglich):

                                                - +
                                                  -
                                                TIMESTAMP : Zeitpunkt des Events, z.B. 2007-12-30 21:45:22
                                                DEVICE : Name des Devices, z.B. Wetterstation
                                                +
                                                @@ -8265,131 +8415,131 @@ attr SMA_Energymeter DbLogValueFn
                                                MySQL : CREATE INDEX Search_Idx ON `fhem`.`history` (DEVICE, READING, TIMESTAMP);
                                                SQLite : CREATE INDEX Search_Idx ON `history` (DEVICE, READING, TIMESTAMP);

                                              - + Der Code zur Anlage ist ebenfalls in den Scripten - SVN -> contrib/dblog/db_create_<DBType>.sql + SVN -> contrib/dblog/db_create_<DBType>.sql enthalten.

                                              - - Für die Verbindung zur Datenbank wird eine Konfigurationsdatei verwendet. - Die Konfiguration ist in einer sparaten Datei abgelegt um das Datenbankpasswort nicht in Klartext in der + + Für die Verbindung zur Datenbank wird eine Konfigurationsdatei verwendet. + Die Konfiguration ist in einer sparaten Datei abgelegt um das Datenbankpasswort nicht in Klartext in der FHEM-Haupt-Konfigurationsdatei speichern zu müssen. Ansonsten wäre es mittels des list Befehls einfach auslesbar.

                                              - - Die Konfigurationsdatei wird z.B. nach /opt/fhem kopiert und hat folgenden Aufbau, den man an seine Umgebung + + Die Konfigurationsdatei wird z.B. nach /opt/fhem kopiert und hat folgenden Aufbau, den man an seine Umgebung anpassen muß (entsprechende Zeilen entkommentieren und anpassen):

                                              - +
                                                   ####################################################################################
                                              -    # database configuration file     
                                              -    #   
                                              +    # database configuration file
                                              +    #
                                                   # NOTE:
                                                   # If you don't use a value for user / password please delete the leading hash mark
                                              -    # and write 'user => ""' respectively 'password => ""' instead !    
                                              +    # and write 'user => ""' respectively 'password => ""' instead !
                                                   #
                                                   #
                                              -    ## for MySQL                                                      
                                              +    ## for MySQL
                                                   ####################################################################################
                                              -    #%dbconfig= (                                                    
                                              -    #    connection => "mysql:database=fhem;host=<database host>;port=3306",    
                                              -    #    user => "fhemuser",                                          
                                              +    #%dbconfig= (
                                              +    #    connection => "mysql:database=fhem;host=<database host>;port=3306",
                                              +    #    user => "fhemuser",
                                                   #    password => "fhempassword",
                                              -    #    # optional enable(1) / disable(0) UTF-8 support 
                                              -    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing 
                                              -    #    # 4.042 is highly suggested)    
                                              -    #    utf8 => 1   
                                              -    #);                                                              
                                              +    #    # optional enable(1) / disable(0) UTF-8 support
                                              +    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing
                                              +    #    # 4.042 is highly suggested)
                                              +    #    utf8 => 1
                                              +    #);
                                                   ####################################################################################
                                              -    #                                                                
                                              -    ## for PostgreSQL                                                
                                              +    #
                                              +    ## for PostgreSQL
                                                   ####################################################################################
                                              -    #%dbconfig= (                                                   
                                              -    #    connection => "Pg:database=fhem;host=<database host>",        
                                              -    #    user => "fhemuser",                                     
                                              -    #    password => "fhempassword"                              
                                              -    #);                                                              
                                              +    #%dbconfig= (
                                              +    #    connection => "Pg:database=fhem;host=<database host>",
                                              +    #    user => "fhemuser",
                                              +    #    password => "fhempassword"
                                              +    #);
                                                   ####################################################################################
                                              -    #                                                                
                                              -    ## for SQLite (username and password stay empty for SQLite)      
                                              +    #
                                              +    ## for SQLite (username and password stay empty for SQLite)
                                                   ####################################################################################
                                              -    #%dbconfig= (                                                   
                                              -    #    connection => "SQLite:dbname=/opt/fhem/fhem.db",        
                                              -    #    user => "",                                             
                                              -    #    password => ""                                          
                                              -    #);                                                              
                                              +    #%dbconfig= (
                                              +    #    connection => "SQLite:dbname=/opt/fhem/fhem.db",
                                              +    #    user => "",
                                              +    #    password => ""
                                              +    #);
                                                   ####################################################################################
                                                   
                                              Wird configDB genutzt, ist das Konfigurationsfile in die configDB hochzuladen !

                                              - + Hinweis zu Sonderzeichen:
                                              - Werden Sonderzeichen, wie z.B. @, $ oder %, welche eine programmtechnische Bedeutung in Perl haben im Passwort verwendet, + Werden Sonderzeichen, wie z.B. @, $ oder %, welche eine programmtechnische Bedeutung in Perl haben im Passwort verwendet, sind diese Zeichen zu escapen. - Das heißt in diesem Beispiel wäre zu verwenden: \@,\$ bzw. \%. + Das heißt in diesem Beispiel wäre zu verwenden: \@,\$ bzw. \%.


                                              - + Define +

                                              +
                                                -
                                                - define <name> DbLog <configfilename> <regexp>

                                                <configfilename> ist die vorbereitete Konfigurationsdatei.
                                                <regexp> ist identisch FileLog der Filelog-Definition.

                                                - + Beispiel:
                                                  define myDbLog DbLog /etc/fhem/db.conf .*:.*
                                                  speichert alles in der Datenbank

                                                - + Nachdem das DbLog-Device definiert wurde, ist empfohlen einen Konfigurationscheck auszuführen:

                                                  set <name> configCheck

                                                - Dieser Check prüft einige wichtige Einstellungen des DbLog-Devices und gibt Empfehlungen für potentielle Verbesserungen. + Dieser Check prüft einige wichtige Einstellungen des DbLog-Devices und gibt Empfehlungen für potentielle Verbesserungen.


                                                - - DbLog unterscheidet den synchronen (Default) und asynchronen Logmodus. Der Logmodus ist über das - Attribut asyncMode einstellbar. Ab Version 2.13.5 unterstützt DbLog einen gesetzten + + DbLog unterscheidet den synchronen (Default) und asynchronen Logmodus. Der Logmodus ist über das + asyncMode einstellbar. Ab Version 2.13.5 unterstützt DbLog einen gesetzten Primary Key (PK) in den Tabellen Current und History. Soll PostgreSQL mit PK genutzt werden, muss PostgreSQL mindestens Version 9.5 sein.

                                                - - Der gespeicherte Wert des Readings wird optimiert für eine automatisierte Nachverarbeitung, z.B. yes wird transformiert + + Der gespeicherte Wert des Readings wird optimiert für eine automatisierte Nachverarbeitung, z.B. yes wird transformiert nach 1.

                                                - + Die gespeicherten Werte können mittels GET Funktion angezeigt werden:
                                                  get myDbLog - - 2012-11-10 2012-11-10 KS300:temperature

                                                - + FileLog-Dateien nach DbLog übertragen

                                                Zur Übertragung von vorhandenen Filelog-Daten in die DbLog-Datenbank steht das spezielle Modul 98_FileLogConvert.pm zur Verfügung.
                                                Dieses Modul kann hier bzw. aus dem Verzeichnis ./contrib geladen werden. - Weitere Informationen und Hilfestellung gibt es im entsprechenden + Weitere Informationen und Hilfestellung gibt es im entsprechenden Forumthread .


                                                - + Reporting und Management von DbLog-Datenbankinhalten

                                                Mit Hilfe SVG können Datenbankinhalte visualisiert werden.
                                                - Darüber hinaus kann das Modul DbRep genutzt werden um tabellarische - Datenbankauswertungen anzufertigen oder den Datenbankinhalt mit den zur Verfügung stehenden Funktionen zu verwalten. + Darüber hinaus kann das Modul DbRep genutzt werden um tabellarische + Datenbankauswertungen anzufertigen oder den Datenbankinhalt mit den zur Verfügung stehenden Funktionen zu verwalten.


                                                - + Troubleshooting

                                                - Wenn nach der erfolgreichen Definition das DbLog-Device nicht wie erwartet arbeitet, + Wenn nach der erfolgreichen Definition das DbLog-Device nicht wie erwartet arbeitet, können folgende Hinweise hilfreich sein:

                                                - +
                                                • Wurden die vorbereitenden Schritte gemacht, die in der commandref beschrieben sind ? (Softwarekomponenten installieren, Tabellen, Index anlegen)
                                                • Wurde ein "set <name> configCheck" nach dem Define durchgeführt und eventuelle Fehler beseitigt bzw. Empfehlungen umgesetzt ?
                                                • @@ -8397,233 +8547,305 @@ attr SMA_Energymeter DbLogValueFn
                                                • Beim Anlegen eines SVG-Plots erscheint keine Drop-Down Liste mit Vorschlagswerten -> Attribut "DbLogType" auf "Current/History" setzen.

                                                - - Sollten diese Hinweise nicht zum Erfolg führen, bitte den verbose-Level im DbLog Device auf 4 oder 5 hochsetzen und + + Sollten diese Hinweise nicht zum Erfolg führen, bitte den verbose-Level im DbLog Device auf 4 oder 5 hochsetzen und die Einträge bezüglich des DbLog-Device im Logfile beachten. - - Zur Problemanalyse bitte die Ausgabe von "list <name>", das Ergebnis von "set <name> configCheck" und die + + Zur Problemanalyse bitte die Ausgabe von "list <name>", das Ergebnis von "set <name> configCheck" und die Ausgaben des DbLog-Device im Logfile im Forumthread posten.

                                                - +


                                              + + Set +
                                              +
                                              - - Set
                                                - set <name> addCacheLine YYYY-MM-DD HH:MM:SS|<device>|<type>|<event>|<reading>|<value>|[<unit>]

                                                -
                                                  Im asynchronen Modus wird ein neuer Datensatz in den Cache eingefügt und beim nächsten Synclauf mit abgearbeitet. +
                                                • set <name> addCacheLine YYYY-MM-DD HH:MM:SS|<device>|<type>|<event>|<reading>|<value>|[<unit>]

                                                  + +
                                                    + Im asynchronen Modus wird ein neuer Datensatz in den Cache eingefügt und beim nächsten Synclauf mit abgearbeitet.

                                                    - + Beispiel:
                                                    set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%
                                                    -

                                                  - - set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

                                                  -
                                                    Fügt einen zusätzlichen Logeintrag einer Device/Reading-Kombination in die Datenbank ein. Die eventuell im Attribut - "DbLogExclude" spezifizierten Readings (im Quelldevice) werden nicht geloggt, es sei denn sie sind im Attribut + +
                                                  +
                                                  + +
                                                • set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

                                                  + +
                                                    + Fügt einen zusätzlichen Logeintrag einer Device/Reading-Kombination in die Datenbank ein. Die eventuell im Attribut + "DbLogExclude" spezifizierten Readings (im Quelldevice) werden nicht geloggt, es sei denn sie sind im Attribut "DbLogInclude" enthalten bzw. der addLog-Aufruf erfolgte mit der Option "!useExcludes".

                                                    - +
                                                    • <devspec>:<Reading> - Das Device kann als Geräte-Spezifikation angegeben werden.
                                                      Die Angabe von "Reading" wird als regulärer Ausdruck ausgewertet. Ist das Reading nicht vorhanden und der Wert "Value" angegeben, wird das Reading - in die DB eingefügt wenn es kein regulärer Ausdruck und ein valider + in die DB eingefügt wenn es kein regulärer Ausdruck und ein valider Readingname ist.
                                                    • Value - Optional kann "Value" für den Readingwert angegeben werden. Ist Value nicht angegeben, wird der aktuelle - Wert des Readings in die DB eingefügt.
                                                    • -
                                                    • CN=<caller name> - Mit dem Schlüssel "CN=" (Caller Name) kann dem addLog-Aufruf ein String, - z.B. der Name des aufrufenden Devices (z.B. eines at- oder notify-Devices), mitgegeben - werden. Mit Hilfe der im Attribut "valueFn" hinterlegten - Funktion kann dieser Schlüssel über die Variable $CN ausgewertet werden. Dadurch ist es - möglich, das Verhalten des addLogs abhängig von der aufrufenden Quelle zu beeinflussen. -
                                                    • -
                                                    • !useExcludes - Ein eventuell im Quell-Device gesetztes Attribut "DbLogExclude" wird von der Funktion berücksichtigt. Soll dieses + Wert des Readings in die DB eingefügt.
                                                    • +
                                                    • CN=<caller name> - Mit dem Schlüssel "CN=" (Caller Name) kann dem addLog-Aufruf ein String, + z.B. der Name des aufrufenden Devices (z.B. eines at- oder notify-Devices), mitgegeben + werden. Mit Hilfe der im valueFn hinterlegten + Funktion kann dieser Schlüssel über die Variable $CN ausgewertet werden. Dadurch ist es + möglich, das Verhalten des addLogs abhängig von der aufrufenden Quelle zu beeinflussen. +
                                                    • +
                                                    • !useExcludes - Ein eventuell im Quell-Device gesetztes Attribut "DbLogExclude" wird von der Funktion berücksichtigt. Soll dieses Attribut nicht berücksichtigt werden, kann das Schüsselwort "!useExcludes" verwendet werden.

                                                    - + Das Datenbankfeld "EVENT" wird automatisch mit "addLog" belegt.
                                                    Es wird KEIN zusätzlicher Event im System erzeugt !

                                                    - + Beispiele:
                                                    set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
                                                    set <name> addLog TYPE=SSCam:state
                                                    set <name> addLog MyWetter:(fc10.*|fc8.*)
                                                    set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
                                                    set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

                                                    - + set <name> addLog USV:state CN=di.cronjob
                                                    - In der valueFn-Funktion wird der Aufrufer "di.cronjob" über die Variable $CN ausgewertet und davon abhängig der + In der valueFn-Funktion wird der Aufrufer "di.cronjob" über die Variable $CN ausgewertet und davon abhängig der Timestamp dieses addLog korrigiert:

                                                    - valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ } - -

                                                  - - set <name> clearReadings

                                                  -
                                                    Leert Readings die von verschiedenen DbLog-Funktionen angelegt wurden.

                                                  - - set <name> eraseReadings

                                                  -
                                                    Löscht alle Readings außer dem Reading "state".

                                                  - - set <name> commitCache

                                                  -
                                                    Im asynchronen Modus (Attribut asyncMode=1), werden die im Speicher gecachten Daten in die Datenbank geschrieben + valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ } + + +
                                                  +
                                                  + +
                                                • set <name> clearReadings

                                                  +
                                                    + Leert Readings die von verschiedenen DbLog-Funktionen angelegt wurden. + + +
                                                  +
                                                  + +
                                                • set <name> eraseReadings

                                                  +
                                                    + Löscht alle Readings außer dem Reading "state". + +
                                                  +
                                                  + +
                                                • set <name> commitCache

                                                  +
                                                    + Im asynchronen Modus (asyncMode=1), werden die im Speicher gecachten Daten in die Datenbank geschrieben und danach der Cache geleert. Der interne Timer des asynchronen Modus wird dabei neu gesetzt. - Der Befehl kann nützlich sein um manuell oder z.B. über ein AT den Cacheinhalt zu einem definierten Zeitpunkt in die - Datenbank zu schreiben.

                                                  + Der Befehl kann nützlich sein um manuell oder z.B. über ein AT den Cacheinhalt zu einem definierten Zeitpunkt in die + Datenbank zu schreiben. +
                                                • +
                                                +
                                                - set <name> configCheck

                                                +
                                              • set <name> configCheck

                                                  Es werden einige wichtige Einstellungen geprüft und Empfehlungen gegeben falls potentielle Verbesserungen - identifiziert wurden. -

                                                + identifiziert wurden. +
                                              • +
                                              +
                                              - set <name> count

                                              -
                                                Zählt die Datensätze in den Tabellen current und history und schreibt die Ergebnisse in die Readings - countCurrent und countHistory.

                                              - - set <name> countNbl

                                              +
                                            • set <name> count

                                              +
                                                Zählt die Datensätze in den Tabellen current und history und schreibt die Ergebnisse in die Readings + countCurrent und countHistory. + +
                                              +
                                              + +
                                            • set <name> countNbl

                                                Die non-blocking Ausführung von "set <name> count".

                                                Hinweis:
                                                Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) - um FHEM nicht zu blockieren ! -

                                              + um FHEM nicht zu blockieren ! +
                                            • +
                                            +
                                            - set <name> deleteOldDays <n>

                                            -
                                              Löscht Datensätze in Tabelle history, die älter sind als <n> Tage sind. - Die Anzahl der gelöschten Datensätze wird in das Reading lastRowsDeleted geschrieben.

                                            +
                                          • set <name> deleteOldDays <n>

                                            +
                                              Löscht Datensätze in Tabelle history, die älter sind als <n> Tage sind. + Die Anzahl der gelöschten Datensätze wird in das Reading lastRowsDeleted geschrieben. + +
                                            +
                                            - set <name> deleteOldDaysNbl <n>

                                            +
                                          • set <name> deleteOldDaysNbl <n>

                                              Identisch zu Funktion "deleteOldDays" wobei deleteOldDaysNbl nicht blockierend ausgeführt wird.

                                              Hinweis:
                                              Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) - um FHEM nicht zu blockieren ! -

                                            + um FHEM nicht zu blockieren ! +
                                          • +
                                          +
                                          - - set <name> exportCache [nopurge | purgecache]

                                          -
                                            Wenn DbLog im asynchronen Modus betrieben wird, kann der Cache mit diesem Befehl in ein Textfile geschrieben + +
                                          • set <name> exportCache [nopurge | purgecache]

                                            +
                                              + Wenn DbLog im asynchronen Modus betrieben wird, kann der Cache mit diesem Befehl in ein Textfile geschrieben werden. Das File wird per Default in dem Verzeichnis (global->modpath)/log/ erstellt. Das Zielverzeichnis kann mit - dem Attribut "expimpdir" geändert werden.
                                              + dem expimpdir geändert werden.
                                              + Der Name des Files wird automatisch generiert und enthält den Präfix "cache_", gefolgt von dem DbLog-Devicenamen und dem aktuellen Zeitstempel, z.B. "cache_LogDB_2017-03-23_22-13-55".
                                              Mit den Optionen "nopurge" bzw. "purgecache" wird festgelegt, ob der Cacheinhalt nach dem Export gelöscht werden soll oder nicht. Mit "nopurge" (default) bleibt der Cacheinhalt erhalten.
                                              - Das Attribut "exportCacheAppend" bestimmt dabei, ob mit jedem Exportvorgang ein neues Exportfile - angelegt wird (default) oder der Cacheinhalt an das bestehende (neueste) Exportfile angehängt wird. -

                                            - - set <name> importCachefile <file>

                                            -
                                              Importiert ein mit "exportCache" geschriebenes File in die Datenbank. + Das exportCacheAppend bestimmt dabei, ob mit jedem Exportvorgang ein neues Exportfile + angelegt wird (default) oder der Cacheinhalt an das bestehende (neueste) Exportfile angehängt wird. + +
                                            +
                                            + +
                                          • set <name> importCachefile <file>

                                            +
                                              + Importiert ein mit "exportCache" geschriebenes File in die Datenbank. Die verfügbaren Dateien werden per Default im Verzeichnis (global->modpath)/log/ gesucht und eine Drop-Down Liste - erzeugt sofern Dateien gefunden werden. Das Quellverzeichnis kann mit dem Attribut expimpdir geändert werden.
                                              - Es werden nur die Dateien angezeigt, die dem Muster "cache_", gefolgt von dem DbLog-Devicenamen entsprechen.
                                              + erzeugt sofern Dateien gefunden werden. Das Quellverzeichnis kann mit dem expimpdir + geändert werden.
                                              + Es werden nur die Dateien angezeigt, die dem Muster "cache_", gefolgt von dem DbLog-Devicenamen entsprechen.
                                              Zum Beispiel "cache_LogDB_2017-03-23_22-13-55", falls das Log-Device "LogDB" heißt.
                                              + Nach einem erfolgreichen Import wird das File mit dem Präfix "impdone_" versehen und erscheint dann nicht mehr - in der Drop-Down Liste. Soll ein Cachefile in eine andere als der Quelldatenbank importiert werden, kann das + in der Drop-Down Liste. Soll ein Cachefile in eine andere als der Quelldatenbank importiert werden, kann das DbLog-Device im Filenamen angepasst werden damit dieses File den Suchktiterien entspricht und in der Drop-Down Liste - erscheint.

                                            - - set <name> listCache

                                            -
                                              Wenn DbLog im asynchronen Modus betrieben wird (Attribut asyncMode=1), können mit diesem Befehl die im Speicher gecachten Events - angezeigt werden.

                                            + erscheint. +
                                          • +
                                          +
                                          - set <name> purgeCache

                                          -
                                            Im asynchronen Modus (Attribut asyncMode=1), werden die im Speicher gecachten Daten gelöscht. - Es werden keine Daten aus dem Cache in die Datenbank geschrieben.

                                          - - set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

                                          -
                                            Reduziert historische Datensätze, die älter sind als <no> Tage und (optional) neuer sind als <nn> Tage - auf einen Eintrag (den ersten) pro Stunde je Device & Reading.
                                            - Innerhalb von device/reading können SQL-Wildcards "%" und "_" verwendet werden.

                                            - - Das Reading "reduceLogState" zeigt den Ausführungsstatus des letzten reduceLog-Befehls.

                                            - Durch die optionale Angabe von 'average' wird nicht nur die Datenbank bereinigt, sondern alle numerischen Werte - einer Stunde werden auf einen einzigen Mittelwert reduziert.
                                            - Durch die optionale Angabe von 'average=day' wird nicht nur die Datenbank bereinigt, sondern alle numerischen - Werte eines Tages auf einen einzigen Mittelwert reduziert. (impliziert 'average')

                                            - - Optional kann als letzer Parameter "exclude=device1:reading1,device2:reading2,...." - angegeben werden um device/reading Kombinationen von reduceLog auszuschließen.

                                            - - Optional kann als letzer Parameter "include=device:reading" angegeben werden um - die auf die Datenbank ausgeführte SELECT-Abfrage einzugrenzen, was die RAM-Belastung verringert und die - Performance erhöht.

                                            - -
                                              - Beispiel:
                                              - set <name> reduceLog 270 average include=Luftdaten_remote:%
                                              - -
                                            -
                                            - - ACHTUNG: Es wird dringend empfohlen zu überprüfen ob der standard INDEX 'Search_Idx' in der Tabelle 'history' existiert!
                                            - Die Abarbeitung dieses Befehls dauert unter Umständen (ohne INDEX) extrem lange. FHEM wird durch den Befehl bis - zur Fertigstellung komplett blockiert !

                                            - -

                                          - - set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

                                          -
                                            - Führt die gleiche Funktion wie "set <name> reduceLog" aus. Im Gegensatz zu reduceLog wird mit FHEM wird durch den Befehl reduceLogNbl nicht - mehr blockiert da diese Funktion non-blocking implementiert ist ! -

                                            +
                                          • set <name> listCache

                                            +
                                              Wenn DbLog im asynchronen Modus betrieben wird (Attribut asyncMode=1), können mit diesem Befehl die im Speicher gecachten Events + angezeigt werden. + +
                                            +
                                            - Hinweis:
                                            - Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) - um FHEM nicht zu blockieren ! -

                                          - - set <name> reopen [n]

                                          -
                                            Schließt die Datenbank und öffnet sie danach sofort wieder wenn keine Zeit [n] in Sekunden angegeben wurde. +
                                          • set <name> purgeCache

                                            +
                                              + Im asynchronen Modus (asyncMode=1), werden die im Speicher gecachten Daten gelöscht. + Es werden keine Daten aus dem Cache in die Datenbank geschrieben. + +
                                            +
                                            + +
                                          • set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

                                            +
                                              + Reduziert historische Datensätze, die älter sind als <no> Tage und (optional) neuer sind als <nn> Tage + auf einen Eintrag (den ersten) pro Stunde je Device & Reading.
                                              + Innerhalb von device/reading können SQL-Wildcards "%" und "_" verwendet werden.

                                              + + Das Reading "reduceLogState" zeigt den Ausführungsstatus des letzten reduceLog-Befehls.

                                              + Durch die optionale Angabe von 'average' wird nicht nur die Datenbank bereinigt, sondern alle numerischen Werte + einer Stunde werden auf einen einzigen Mittelwert reduziert.
                                              + Durch die optionale Angabe von 'average=day' wird nicht nur die Datenbank bereinigt, sondern alle numerischen + Werte eines Tages auf einen einzigen Mittelwert reduziert. (impliziert 'average')

                                              + + Optional kann als letzer Parameter "exclude=device1:reading1,device2:reading2,...." + angegeben werden um device/reading Kombinationen von reduceLog auszuschließen.

                                              + + Optional kann als letzer Parameter "include=device:reading" angegeben werden um + die auf die Datenbank ausgeführte SELECT-Abfrage einzugrenzen, was die RAM-Belastung verringert und die + Performance erhöht.

                                              + +
                                                + Beispiel:
                                                + set <name> reduceLog 270 average include=Luftdaten_remote:%
                                                + +
                                              +
                                              + + ACHTUNG: Es wird dringend empfohlen zu überprüfen ob der standard INDEX 'Search_Idx' in der Tabelle 'history' existiert!
                                              + Die Abarbeitung dieses Befehls dauert unter Umständen (ohne INDEX) extrem lange. FHEM wird durch den Befehl bis + zur Fertigstellung komplett blockiert ! + +
                                            +
                                            + +
                                          • set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

                                            +
                                              + Führt die gleiche Funktion wie "set <name> reduceLog" aus. Im Gegensatz zu reduceLog wird mit FHEM wird durch den Befehl reduceLogNbl nicht + mehr blockiert da diese Funktion non-blocking implementiert ist ! +

                                              + + Hinweis:
                                              + Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) + um FHEM nicht zu blockieren ! + +
                                            +
                                            + +
                                          • set <name> reopen [n]

                                            +
                                              + Schließt die Datenbank und öffnet sie danach sofort wieder wenn keine Zeit [n] in Sekunden angegeben wurde. Dabei wird die Journaldatei geleert und neu angelegt.
                                              Verbessert den Datendurchsatz und vermeidet Speicherplatzprobleme.
                                              - Wurde eine optionale Verzögerungszeit [n] in Sekunden angegeben, wird die Verbindung zur Datenbank geschlossen und erst - nach Ablauf von [n] Sekunden wieder neu verbunden. - Im synchronen Modus werden die Events in dieser Zeit nicht gespeichert. - Im asynchronen Modus werden die Events im Cache gespeichert und nach dem Reconnect in die Datenbank geschrieben.

                                            + Wurde eine optionale Verzögerungszeit [n] in Sekunden angegeben, wird die Verbindung zur Datenbank geschlossen und erst + nach Ablauf von [n] Sekunden wieder neu verbunden. + Im synchronen Modus werden die Events in dieser Zeit nicht gespeichert. + Im asynchronen Modus werden die Events im Cache gespeichert und nach dem Reconnect in die Datenbank geschrieben. +
                                          • +
                                          +
                                          - set <name> rereadcfg

                                          +
                                        • set <name> rereadcfg

                                            Schließt die Datenbank und öffnet sie danach sofort wieder. Dabei wird die Journaldatei geleert und neu angelegt.
                                            Verbessert den Datendurchsatz und vermeidet Speicherplatzprobleme.
                                            - Zwischen dem Schließen der Verbindung und dem Neuverbinden werden die Konfigurationsdaten neu gelesen

                                          + Zwischen dem Schließen der Verbindung und dem Neuverbinden werden die Konfigurationsdaten neu gelesen +
                                        • +
                                        +
                                        - set <name> userCommand <validSqlStatement>

                                        +
                                      • set <name> userCommand <validSqlStatement>

                                          - Führt einfache sql select Befehle auf der Datenbank aus. Der Befehl und ein zurückgeliefertes - Ergebnis wird in das Reading "userCommand" bzw. "userCommandResult" geschrieben. Das Ergebnis kann nur - einzeilig sein. - Die Ausführung von SQL-Befehlen in DbLog ist veraltet. Dafür sollte das Auswertungsmodul + Führt einfache sql select Befehle auf der Datenbank aus. Der Befehl und ein zurückgeliefertes + Ergebnis wird in das Reading "userCommand" bzw. "userCommandResult" geschrieben. Das Ergebnis kann nur + einzeilig sein. + Die Ausführung von SQL-Befehlen in DbLog ist veraltet. Dafür sollte das Auswertungsmodul DbRep genutzt werden.
                                          -

                                        +
                                      • +
                                      +
                                      -

                                    - - - - Get -
                                      - get <name> ReadingsVal       <device> <reading> <default>
                                      - get <name> ReadingsTimestamp <device> <reading> <default>
                                      -
                                      - Liest einen einzelnen Wert aus der Datenbank, Benutzung und Syntax sind weitgehend identisch zu ReadingsVal() und ReadingsTimestamp().
                                    -
                                    -
                                    +
                                    + + + Get +
                                    +
                                    +
                                      - get <name> <infile> <outfile> <from> - <to> <column_spec> -

                                      +
                                    • get <name> ReadingsVal <device> <reading> <default>
                                      +
                                    • +
                                    • get <name> ReadingsTimestamp <device> <reading> <default>

                                      + + Liest einen einzelnen Wert aus der Datenbank. Die Syntax ist weitgehend identisch zu ReadingsVal() und ReadingsTimestamp(). +
                                      +
                                    • +
                                    +
                                    + +
                                      +
                                    • get <name> <infile> <outfile> <from> + <to> <column_spec>

                                      + Liesst Daten aus der Datenbank. Wird durch die Frontends benutzt um Plots zu generieren ohne selbst auf die Datenank zugreifen zu müssen.
                                      +
                                      • <in>
                                        Ein Parameter um eine Kompatibilität zum Filelog herzustellen. @@ -8633,9 +8855,9 @@ attr SMA_Energymeter DbLogValueFn
                                      • current: die aktuellen Werte aus der Tabelle "current" werden gelesen.
                                      • history: die historischen Werte aus der Tabelle "history" werden gelesen.
                                      • -: identisch wie "history"
                                      • -
                                      +
                                    - +
                                  • <out>
                                    Ein Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist per default immer auf - zu setzen um die @@ -8648,12 +8870,12 @@ attr SMA_Energymeter DbLogValueFn
                                  • -: default
                                • - +
                                • <from> / <to>
                                  Wird benutzt um den Zeitraum der Daten einzugrenzen. Es ist das folgende Zeitformat oder ein Teilstring davon zu benutzen:
                                    YYYY-MM-DD_HH24:MI:SS
                                • - +
                                • <column_spec>
                                  Für jede column_spec Gruppe wird ein Datenset zurückgegeben welches durch einen Kommentar getrennt wird. Dieser Kommentar repräsentiert @@ -8695,14 +8917,14 @@ attr SMA_Energymeter DbLogValueFn
                              • <regexp>
                                - Diese Zeichenkette wird als Perl Befehl ausgewertet. + Diese Zeichenkette wird als Perl Befehl ausgewertet. Die regexp wird vor dem angegebenen <fn> Parameter ausgeführt.
                                Bitte zur Beachtung: Diese Zeichenkette darf keine Leerzeichen enthalten da diese sonst als <column_spec> Trennung interpretiert werden und alles nach dem Leerzeichen als neue <column_spec> gesehen wird.
                                - + Schlüsselwörter
                              • $val ist der aktuelle Wert die die Datenbank für ein Device/Reading ausgibt.
                              • $ts ist der aktuelle Timestamp des Logeintrages.
                              • @@ -8712,68 +8934,70 @@ attr SMA_Energymeter DbLogValueFn nicht für eine Folgeberechnung verwendet.
                            • - +


                            Beispiele:
                            • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature
                            • - +
                            • get myDbLog current ALL - - %:temperature

                            • Damit erhält man alle aktuellen Readings "temperature" von allen in der DB geloggten Devices. Achtung: bei Nutzung von Jokerzeichen auf die history-Tabelle kann man sein FHEM aufgrund langer Laufzeit lahmlegen! - +
                            • get myDbLog - - 2012-11-10_10 2012-11-10_20 KS300:temperature::int1
                              gibt Daten aus von 10Uhr bis 20Uhr am 10.11.2012
                            • - +
                            • get myDbLog - all 2012-11-10 2012-11-20 KS300:temperature
                            • - +
                            • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature KS300:rain::delta-h KS300:rain::delta-d
                            • - +
                            • get myDbLog - - 2012-11-10 2012-11-20 MyFS20:data:::$val=~s/(on|off).*/$1eq"on"?1:0/eg
                              gibt 1 zurück für alle Ausprägungen von on* (on|on-for-timer etc) und 0 für alle off*
                            • - +
                            • get myDbLog - - 2012-11-10 2012-11-20 Bodenfeuchte:data:::$val=~s/.*B:\s([-\.\d]+).*/$1/eg
                              Beispiel von OWAD: Ein Wert wie z.B.: "A: 49.527 % B: 66.647 % C: 9.797 % D: 0.097 V"
                              und die Ausgabe ist für das Reading B folgende: 2012-11-20_10:23:54 66.647
                            • - +
                            • get DbLog - - 2013-05-26 2013-05-28 Pumpe:data::delta-ts:$val=~s/on/hide/
                              Realisierung eines Betriebsstundenzählers. Durch delta-ts wird die Zeit in Sek zwischen den Log- - Einträgen ermittelt. Die Zeiten werden bei den on-Meldungen nicht ausgegeben welche einer Abschaltzeit + Einträgen ermittelt. Die Zeiten werden bei den on-Meldungen nicht ausgegeben welche einer Abschaltzeit entsprechen würden.
                            -

                            +
                          +
                          Get für die Nutzung von webcharts
                            - get <name> <infile> <outfile> <from> - <to> <device> <querytype> <xaxis> <yaxis> <savename> -

                            +
                          • get <name> <infile> <outfile> <from> + <to> <device> <querytype> <xaxis> <yaxis> <savename>
                          • +
                            + Liest Daten aus der Datenbank aus und gibt diese in JSON formatiert aus. Wird für das Charting Frontend genutzt
                            • <name>
                              Der Name des definierten DbLogs, so wie er in der fhem.cfg angegeben wurde.
                            • - +
                            • <in>
                              Ein Dummy Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist immer auf - zu setzen.
                            • - +
                            • <out>
                              - Ein Dummy Parameter um eine Kompatibilität zum Filelog herzustellen. + Ein Dummy Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist auf webchart zu setzen um die Charting Get Funktion zu nutzen.
                            • - +
                            • <from> / <to>
                              Wird benutzt um den Zeitraum der Daten einzugrenzen. Es ist das folgende Zeitformat zu benutzen:
                                YYYY-MM-DD_HH24:MI:SS
                            • - +
                            • <device>
                              Ein String, der das abzufragende Device darstellt.
                            • - +
                            • <querytype>
                              Ein String, der die zu verwendende Abfragemethode darstellt. Zur Zeit unterstützte Werte sind:
                              getreadings um für ein bestimmtes device alle Readings zu erhalten
                              @@ -8789,81 +9013,84 @@ attr SMA_Energymeter DbLogValueFn monthstats um Statistiken für einen Wert (yaxis) für einen Monat abzufragen.
                              yearstats um Statistiken für einen Wert (yaxis) für ein Jahr abzufragen.
                            • - +
                            • <xaxis>
                              Ein String, der die X-Achse repräsentiert
                            • - +
                            • <yaxis>
                              Ein String, der die Y-Achse repräsentiert
                            • - +
                            • <savename>
                              Ein String, unter dem ein Chart in der Datenbank gespeichert werden soll
                            • - +
                            • <chartconfig>
                              Ein jsonstring der den zu speichernden Chart repräsentiert
                            • - +
                            • <pagingstart>
                              Ein Integer um den Startwert für die Abfrage 'getTableData' festzulegen
                            • - +
                            • <paginglimit>
                              Ein Integer um den Limitwert für die Abfrage 'getTableData' festzulegen


                            - + Beispiele:
                            • get logdb - webchart "" "" "" getcharts
                              Liefert alle gespeicherten Charts aus der Datenbank
                            • - +
                            • get logdb - webchart "" "" "" getdevices
                              Liefert alle verfügbaren Devices aus der Datenbank
                            • - +
                            • get logdb - webchart "" "" ESA2000_LED_011e getreadings
                              Liefert alle verfügbaren Readings aus der Datenbank unter Angabe eines Gerätes
                            • - +
                            • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e timerange TIMESTAMP day_kwh
                              Liefert Chart-Daten, die auf folgenden Parametern basieren: 'xaxis', 'yaxis', 'device', 'to' und 'from'
                              Die Ausgabe erfolgt als JSON, z.B.: [{'TIMESTAMP':'2013-02-11 00:10:10','VALUE':'0.22431388090756'},{'TIMESTAMP'.....}]
                            • - +
                            • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e savechart TIMESTAMP day_kwh tageskwh
                              Speichert einen Chart unter Angabe eines 'savename' und seiner zugehörigen Konfiguration
                            • - +
                            • get logdb - webchart "" "" "" deletechart "" "" 7
                              Löscht einen zuvor gespeicherten Chart unter Angabe einer id


                          - + + Attribute -

                          - +
                          +
                          +
                            - +
                          • addStateEvent
                              attr <device> addStateEvent [0|1] -
                              +


                              + Bekanntlich wird normalerweise bei einem Event mit dem Reading "state" der state-String entfernt, d.h. der Event ist nicht zum Beispiel "state: on" sondern nur "on".
                              Meistens ist es aber hilfreich in DbLog den kompletten Event verarbeiten zu können. Deswegen übernimmt DbLog per Default den Event inklusive dem Reading-String "state".
                              In einigen Fällen, z.B. alten oder speziellen Modulen, ist es allerdings wünschenswert den state-String wie gewöhnlich zu entfernen. In diesen Fällen bitte addStateEvent = "0" setzen. - Versuchen sie bitte diese Einstellung, falls es mit dem Standard Probleme geben sollte. + Versuchen sie bitte diese Einstellung, falls es mit dem Standard Probleme geben sollte.

                          - +
                            - +
                          • asyncMode
                              attr <device> asyncMode [1|0] -
                              - +

                              + Dieses Attribut stellt den Arbeitsmodus von DbLog ein. Im asynchronen Modus (asyncMode=1), werden die zu speichernden Events zunächst in Speicher gecacht. Nach Ablauf der Synchronisationszeit (Attribut syncInterval) oder bei Erreichen der maximalen Anzahl der Datensätze im Cache (Attribut cacheLimit) werden die gecachten Events im Block in die Datenbank geschrieben. @@ -8871,38 +9098,39 @@ attr SMA_Energymeter DbLogValueFn geschrieben falls sie dann verfügbar ist.
                              Im asynchronen Mode werden die Daten nicht blockierend mit einem separaten Hintergrundprozess in die Datenbank geschrieben. Det Timeout-Wert für diesen Hintergrundprozess kann mit dem Attribut "timeout" (Default 86400s) eingestellt werden. - Im synchronen Modus (Normalmodus) werden die Events nicht gecacht und sofort in die Datenbank geschrieben. Ist die Datenbank nicht + Im synchronen Modus (Normalmodus) werden die Events nicht gecacht und sofort in die Datenbank geschrieben. Ist die Datenbank nicht verfügbar, gehen sie verloren.

                          - +
                            - +
                          • bulkInsert
                              attr <device> bulkInsert [1|0] -
                              - - Schaltet den Insert-Modus zwischen "Array" (default) und "Bulk" um. Der Bulk Modus führt beim Insert von sehr - vielen Datensätzen in die history-Tabelle zu einer erheblichen Performancesteigerung vor allem im asynchronen - Mode. Um die volle Performancesteigerung zu erhalten, sollte in diesem Fall das Attribut "DbLogType" +

                              + + Schaltet den Insert-Modus zwischen "Array" (default) und "Bulk" um. Der Bulk Modus führt beim Insert von sehr + vielen Datensätzen in die history-Tabelle zu einer erheblichen Performancesteigerung vor allem im asynchronen + Mode. Um die volle Performancesteigerung zu erhalten, sollte in diesem Fall das Attribut "DbLogType" nicht die current-Tabelle enthalten.

                          - +
                            - +
                          • cacheEvents
                              attr <device> cacheEvents [2|1|0] -
                              +

                              +
                              • cacheEvents=1: es werden Events für das Reading CacheUsage erzeugt wenn ein Event zum Cache hinzugefügt wurde.
                              • -
                              • cacheEvents=2: es werden Events für das Reading CacheUsage erzeugt wenn im asynchronen Mode der Schreibzyklus in die +
                              • cacheEvents=2: es werden Events für das Reading CacheUsage erzeugt wenn im asynchronen Mode der Schreibzyklus in die Datenbank beginnt. CacheUsage enthält zu diesem Zeitpunkt die Anzahl der in die Datenbank zu schreibenden Datensätze.

                              @@ -8910,75 +9138,75 @@ attr SMA_Energymeter DbLogValueFn

                            - +
                              - +
                            • cacheLimit
                                - attr <device> cacheLimit <n> -
                                - + attr <device> cacheLimit <n> +

                                + Im asynchronen Logmodus wird der Cache in die Datenbank weggeschrieben und geleert wenn die Anzahl <n> Datensätze im Cache erreicht ist (default: 500).
                                - Der Timer des asynchronen Logmodus wird dabei neu auf den Wert des Attributs "syncInterval" + Der Timer des asynchronen Logmodus wird dabei neu auf den Wert des Attributs "syncInterval" gesetzt. Im Fehlerfall wird ein erneuter Schreibversuch frühestens nach syncInterval/2 gestartet.

                            - +
                              - +
                            • cacheOverflowThreshold
                                - attr <device> cacheOverflowThreshold <n> -
                                - - Legt im asynchronen Logmodus den Schwellenwert von <n> Datensätzen fest, ab dem der Cacheinhalt in ein File + attr <device> cacheOverflowThreshold <n> +

                                + + Legt im asynchronen Logmodus den Schwellenwert von <n> Datensätzen fest, ab dem der Cacheinhalt in ein File exportiert wird anstatt die Daten in die Datenbank zu schreiben.
                                - Die Funktion entspricht dem Set-Kommando "exportCache purgecache" und verwendet dessen Einstellungen.
                                - Mit diesem Attribut kann eine Überlastung des Serverspeichers verhindert werden falls die Datenbank für eine längere - Zeit nicht verfügbar ist (z.B. im Fehler- oder Wartungsfall). Ist der Attributwert kleiner oder gleich dem Wert des + Die Funktion entspricht dem Set-Kommando "exportCache purgecache" und verwendet dessen Einstellungen.
                                + Mit diesem Attribut kann eine Überlastung des Serverspeichers verhindert werden falls die Datenbank für eine längere + Zeit nicht verfügbar ist (z.B. im Fehler- oder Wartungsfall). Ist der Attributwert kleiner oder gleich dem Wert des Attributs "cacheLimit", wird der Wert von "cacheLimit" für "cacheOverflowThreshold" verwendet.
                                In diesem Fall wird der Cache immer in ein File geschrieben anstatt in die Datenbank sofern der Schwellenwert erreicht wurde.
                                - So können die Daten mit dieser Einstellung gezielt in ein oder mehrere Dateien geschreiben werden, um sie zu einem + So können die Daten mit dieser Einstellung gezielt in ein oder mehrere Dateien geschreiben werden, um sie zu einem späteren Zeitpunkt mit dem Set-Befehl "importCachefile" in die Datenbank zu importieren.

                            - +
                              - +
                            • colEvent
                                - attr <device> colEvent <n> -
                                - + attr <device> colEvent <n> +

                                + Die Feldlänge für das DB-Feld EVENT wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colEvent=0 wird das Datenbankfeld + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colEvent=0 wird das Datenbankfeld EVENT nicht gefüllt.
                                - Hinweis:
                                + Hinweis:
                                Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !

                            - +
                              - +
                            • colReading
                                - attr <device> colReading <n> -
                                - + attr <device> colReading <n> +

                                + Die Feldlänge für das DB-Feld READING wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colReading=0 wird das Datenbankfeld + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colReading=0 wird das Datenbankfeld READING nicht gefüllt.
                                Hinweis:
                                Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
                                @@ -8986,17 +9214,17 @@ attr SMA_Energymeter DbLogValueFn

                              - +
                                - +
                              • colValue
                                  - attr <device> colValue <n> -
                                  - + attr <device> colValue <n> +

                                  + Die Feldlänge für das DB-Feld VALUE wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colValue=0 wird das Datenbankfeld + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colValue=0 wird das Datenbankfeld VALUE nicht gefüllt.
                                  Hinweis:
                                  Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
                                  @@ -9004,72 +9232,72 @@ attr SMA_Energymeter DbLogValueFn

                                - +
                                  - +
                                • commitMode
                                    attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] -
                                    - - Ändert die Verwendung der Datenbank Autocommit- und/oder Transaktionsfunktionen. +

                                    + + Ändert die Verwendung der Datenbank Autocommit- und/oder Transaktionsfunktionen. Wird Transaktion "aus" verwendet, werden im asynchronen Modus nicht gespeicherte Datensätze nicht an den Cache zurück - gegeben. + gegeben. Dieses Attribut ist ein advanced feature und sollte nur im konkreten Bedarfs- bzw. Supportfall geändert werden.

                                    - +
                                    • basic_ta:on - Autocommit Servereinstellung / Transaktion ein (default)
                                    • basic_ta:off - Autocommit Servereinstellung / Transaktion aus
                                    • ac:on_ta:on - Autocommit ein / Transaktion ein
                                    • ac:on_ta:off - Autocommit ein / Transaktion aus
                                    • ac:off_ta:on - Autocommit aus / Transaktion ein (Autocommit "aus" impliziert Transaktion "ein")
                                    • -
                                    +

                              - +
                                - +
                              • convertTimezone
                                  - attr <device> convertTimezone [UTC | none] -
                                  - + attr <device> convertTimezone [UTC | none] +

                                  + UTC - der lokale Timestamp des Events wird nach UTC konvertiert.
                                  (default: none)

                                  - + Hinweis:
                                  Die Perl-Module 'DateTime' und 'DateTime::Format::Strptime' müssen installiert sein !

                              - +
                                - +
                              • DbLogType
                                  attr <device> DbLogType [Current|History|Current/History|SampleFill/History] -
                                  - +

                                  + Dieses Attribut legt fest, welche Tabelle oder Tabellen in der Datenbank genutzt werden sollen. Ist dieses Attribut nicht gesetzt, wird per default die Einstellung history verwendet.

                                  - + Bedeutung der Einstellungen sind:

                                  - +
                                    - +
                                    - - - @@ -9077,88 +9305,142 @@ attr SMA_Energymeter DbLogValueFn

                                    - + Hinweis:
                                    - Die Current-Tabelle muß genutzt werden um eine Device:Reading-DropDownliste zur Erstellung eines + Die Current-Tabelle muß genutzt werden um eine Device:Reading-DropDownliste zur Erstellung eines SVG-Plots zu erhalten.

                                    - +
                                      - +
                                    • DbLogSelectionMode
                                        attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] -
                                        - - Dieses fuer DbLog-Devices spezifische Attribut beeinflußt, wie die Device-spezifischen Attribute - DbLogExclude und DbLogInclude (s.u.) ausgewertet werden.
                                        - Fehlt dieses Attribut, wird "Exclude" als Default angenommen.
                                        - +

                                        + + Dieses für DbLog-Devices spezifische Attribut beeinflußt, wie die Device-spezifischen Attribute + DbLogExclude und DbLogInclude + ausgewertet werden. DbLogExclude und DbLogInclude werden in den Quellen-Devices gesetzt.
                                        + Ist das Attribut DbLogSelectionMode nicht gesetzt, ist "Exclude" der Default. +

                                        +
                                          -
                                        • Exclude: DbLog verhaelt sich wie bisher auch, alles was ueber die RegExp im DEF angegeben ist, wird geloggt, bis auf das, - was ueber die RegExp in DbLogExclude ausgeschlossen wird.
                                          - Das Attribut DbLogInclude wird in diesem Fall nicht beruecksichtigt
                                        • -
                                        • Include: Es wird nur das geloggt was ueber die RegExp in DbLogInclude (im Quelldevice) eingeschlossen wird.
                                          - Das Attribut DbLogExclude wird in diesem Fall ebenso wenig beruecksichtigt wie die Regex im DEF. Auch - der Devicename (des Quelldevice) geht in die Auswertung nicht mit ein.
                                        • -
                                        • Exclude/Include: Funktioniert im Wesentlichen wie "Exclude", nur das sowohl DbLogExclude als auch DbLogInclude - geprueft werden. Readings die durch DbLogExclude zwar ausgeschlossen wurden, mit DbLogInclude aber wiederum eingeschlossen werden, - werden somit dennoch geloggt.
                                        • +
                                        • Exclude: Readings werden geloggt wenn sie auf den im DEF angegebenen Regex matchen. Ausgeschlossen werden + die Readings, die auf den Regex im Attribut DbLogExclude matchen.
                                          + Das Attribut DbLogInclude wird in diesem Fall nicht berücksichtigt. +
                                        • +
                                          +
                                        • Include: Es werden nur Readings geloggt welche über den Regex im Attribut DbLogInclude + eingeschlossen werden.
                                          + Das Attribut DbLogExclude wird in diesem Fall ebenso wenig berücksichtigt wie der Regex im DEF. +
                                        • +
                                          +
                                        • Exclude/Include: Funktioniert im Wesentlichen wie "Exclude", nur dass sowohl das Attribut DbLogExclude + als auch das Attribut DbLogInclude geprüft wird. + Readings die durch DbLogExclude zwar ausgeschlossen wurden, mit DbLogInclude aber + wiederum eingeschlossen werden, werden somit dennoch beim Logging berücksichtigt. +

                                    - +
                                      - +
                                    • DbLogInclude
                                        - attr <device> DbLogInclude regex:MinInterval[:force],[regex:MinInterval[:force]] ... -
                                        - - Wird DbLog genutzt, wird in allen Devices das Attribut DbLogInclude propagiert. - DbLogInclude funktioniert im Endeffekt genau wie DbLogExclude, ausser dass Readings mit diesen RegExp - in das Logging eingeschlossen statt ausgeschlossen werden koennen.
                                        - Ist MinIntervall angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der Wert - des Readings sich nicht verändert hat. - Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der - Wert des Readings verändert hat.
                                        - Siehe auch die DbLog Attribute defaultMinInterval und DbLogSelectionMode. - Es beeinflußt wie DbLogExclude und DbLogInclude ausgewertet werden.

                                        + attr DbLogInclude Regex[:MinInterval][:force],[Regex[:MinInterval][:force]], ... +

                                        - Beispiel
                                        + Mit dem Attribut DbLogInclude werden die Readings definiert, die in der Datenbank gespeichert werden sollen.
                                        + Die Definition der zu speichernden Readings erfolgt über einen regulären Ausdruck und alle Readings, die mit dem + regulären Ausdruck matchen, werden in der Datenbank gespeichert.
                                        + + Der optionale Zusatz <MinInterval> gibt an, dass ein Wert dann gespeichert wird wenn mindestens <MinInterval> + Sekunden seit der letzten Speicherung vergangen sind.
                                        + + Unabhängig vom Ablauf des Intervalls wird das Reading gespeichert wenn sich der Wert des Readings verändert hat.
                                        + Mit dem optionalen Modifier "force" kann erzwungen werden das angegebene Intervall <MinInterval> einzuhalten auch + wenn sich der Wert des Readings seit der letzten Speicherung verändert hat. +

                                        + +
                                          +
                                          +        | Modifier |         innerhalb Intervall          | außerhalb Intervall |
                                          +        |          | Wert gleich        | Wert geändert   |                     |
                                          +        |----------+--------------------+-----------------+---------------------|
                                          +        | <none>   | ignorieren         | speichern       | speichern           |
                                          +        | force    | ignorieren         | ignorieren      | speichern           |
                                          +      
                                          +
                                        + +
                                        + Hinweise:
                                        + Das Attribut DbLogInclude wird in allen Devices propagiert wenn DbLog verwendet wird.
                                        + Das Attribut DbLogSelectionMode muss entsprechend gesetzt sein + um DbLogInclude zu aktivieren.
                                        + Mit dem Attribut defaultMinInterval kann ein Default für + <MinInterval> vorgegeben werden. +

                                        + + Beispiele:
                                        attr MyDevice1 DbLogInclude .*
                                        attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300,battery:3600
                                        attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300:force,battery:3600:force
                                      -
                                    • + +

                                    - +
                                      - +
                                    • DbLogExclude
                                        - attr <device> DbLogExclude regex:MinInterval[:force],[regex:MinInterval[:force]] ... -
                                        - - Wird DbLog genutzt, wird in allen Devices das Attribut DbLogExclude propagiert. - Der Wert des Attributes wird als Regexp ausgewertet und schliesst die damit matchenden Readings von einem Logging aus. - Einzelne Regexp werden durch Komma getrennt.
                                        - Ist MinIntervall angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der - Wert des Readings sich nicht verändert hat. - Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der - Wert des Readings verändert hat.
                                        - Siehe auch die DbLog Attribute defaultMinInterval und DbLogSelectionMode. - Es beeinflußt wie DbLogExclude und DbLogInclude ausgewertet werden.

                                        - + attr <device> DbLogExclude regex[:MinInterval][:force],[regex[:MinInterval][:force]] ... +

                                        + + Mit dem Attribut DbLogExclude werden die Readings definiert, die nicht in der Datenbank gespeichert werden + sollen.
                                        + Die Definition der auszuschließenden Readings erfolgt über einen regulären Ausdruck und alle Readings, die mit dem + regulären Ausdruck matchen, werden vom Logging in die Datenbank ausgeschlossen.
                                        + + Readings, die nicht über den Regex ausgeschlossen wurden, werden in der Datenbank geloggt. Das Verhalten der + Speicherung wird mit den nachfolgenden optionalen Angaben gesteuert.
                                        + Der optionale Zusatz <MinInterval> gibt an, dass ein Wert dann gespeichert wird wenn mindestens <MinInterval> + Sekunden seit der letzten Speicherung vergangen sind.
                                        + + Unabhängig vom Ablauf des Intervalls wird das Reading gespeichert wenn sich der Wert des Readings verändert hat.
                                        + Mit dem optionalen Modifier "force" kann erzwungen werden das angegebene Intervall <MinInterval> einzuhalten auch + wenn sich der Wert des Readings seit der letzten Speicherung verändert hat. +

                                        + +
                                          +
                                          +        | Modifier |         innerhalb Intervall          | außerhalb Intervall |
                                          +        |          | Wert gleich        | Wert geändert   |                     |
                                          +        |----------+--------------------+-----------------+---------------------|
                                          +        | <none>   | ignorieren         | speichern       | speichern           |
                                          +        | force    | ignorieren         | ignorieren      | speichern           |
                                          +      
                                          +
                                        + +
                                        + Hinweise:
                                        + Das Attribut DbLogExclude wird in allen Devices propagiert wenn DbLog verwendet wird.
                                        + Das Attribut DbLogSelectionMode kann entsprechend gesetzt werden + um DbLogExclude zu deaktivieren.
                                        + Mit dem Attribut defaultMinInterval kann ein Default für + <MinInterval> vorgegeben werden. +

                                        + Beispiel
                                        attr MyDevice1 DbLogExclude .*
                                        attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300,battery:3600
                                        @@ -9167,32 +9449,32 @@ attr SMA_Energymeter DbLogValueFn

                                      - +
                                        - +
                                      • DbLogValueFn
                                          attr <device> DbLogValueFn {} -
                                          - +

                                          + Wird DbLog genutzt, wird in allen Devices das Attribut DbLogValueFn propagiert. - Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $READING, $VALUE (Wert des Readings) und + Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $READING, $VALUE (Wert des Readings) und $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt.
                                          - Außerdem hat man Lesezugriff auf $DEVICE (den Namen des Quellgeräts), $EVENT, $LASTTIMESTAMP und $LASTVALUE + Außerdem hat man Lesezugriff auf $DEVICE (den Namen des Quellgeräts), $EVENT, $LASTTIMESTAMP und $LASTVALUE zur Bewertung in Ihrem Ausdruck.
                                          Die Variablen $LASTTIMESTAMP und $LASTVALUE enthalten Zeit und Wert des zuletzt protokollierten Datensatzes von $DEVICE / $READING.
                                          - Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden, ansonsten wird der + Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden, ansonsten wird der geänderte $timestamp nicht übernommen. - Zusätzlich kann durch Setzen der Variable "$IGNORE=1" der Datensatz vom Logging ausgeschlossen werden.
                                          - Die devicespezifische Funktion in "DbLogValueFn" wird vor der eventuell im DbLog-Device vorhandenen Funktion im Attribut + Zusätzlich kann durch Setzen der Variable "$IGNORE=1" der Datensatz vom Logging ausgeschlossen werden.
                                          + Die devicespezifische Funktion in "DbLogValueFn" wird vor der eventuell im DbLog-Device vorhandenen Funktion im Attribut "valueFn" auf den Datensatz angewendet.

                                          - + Beispiel
                                           attr SMA_Energymeter DbLogValueFn
                                          -{ 
                                          +{
                                             if ($READING eq "Bezug_WirkP_Kosten_Diff"){
                                               $UNIT="Diff-W";
                                             }
                                          @@ -9204,83 +9486,83 @@ attr SMA_Energymeter DbLogValueFn
                                                
                                      - +
                                        - +
                                      • dbSchema
                                          attr <device> dbSchema <schema> -
                                          - - Dieses Attribut ist setzbar für die Datenbanken MySQL/MariaDB und PostgreSQL. Die Tabellennamen (current/history) werden +

                                          + + Dieses Attribut ist setzbar für die Datenbanken MySQL/MariaDB und PostgreSQL. Die Tabellennamen (current/history) werden durch das angegebene Datenbankschema ergänzt. Das Attribut ist ein advanced Feature und nomalerweise nicht nötig zu setzen.

                                      - +
                                        - +
                                      • defaultMinInterval
                                          attr <device> defaultMinInterval <devspec>::<MinInterval>[::force],[<devspec>::<MinInterval>[::force]] ... -
                                          - +

                                          + Mit diesem Attribut wird ein Standard Minimum Intervall für devspec festgelegt. - Ist defaultMinInterval angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der + Ist defaultMinInterval angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der Wert des Readings sich nicht verändert hat.
                                          - Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der + Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der Wert des Readings verändert hat.
                                          - Eventuell im Quelldevice angegebene Spezifikationen DbLogExclude / DbLogInclude haben Vorrag und werden durch + Eventuell im Quelldevice angegebene Spezifikationen DbLogExclude / DbLogInclude haben Vorrag und werden durch defaultMinInterval nicht überschrieben.
                                          Die Eingabe kann mehrzeilig erfolgen.

                                          Beispiele
                                          attr dblog defaultMinInterval .*::120::force
                                          - # Events aller Devices werden nur geloggt, wenn 120 Sekunden zum letzten Logeintrag vergangen sind ist (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.
                                          + # Events aller Devices werden nur geloggt, wenn 120 Sekunden zum letzten Logeintrag vergangen sind ist (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.
                                          attr dblog defaultMinInterval (Weather|SMA)::300
                                          - # Events der Devices "Weather" und "SMA" werden nur geloggt wenn 300 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) und sich der Wert nicht geändert hat.
                                          + # Events der Devices "Weather" und "SMA" werden nur geloggt wenn 300 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) und sich der Wert nicht geändert hat.
                                          attr dblog defaultMinInterval TYPE=CUL_HM::600::force
                                          # Events aller Devices des Typs "CUL_HM" werden nur geloggt, wenn 600 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.

                                      - +
                                        - +
                                      • disable
                                          attr <device> disable [0|1] -
                                          - - Das DbLog Device wird disabled (1) bzw. enabled (0). +

                                          + + Das DbLog Device wird disabled (1) bzw. enabled (0).

                                      - +
                                        - +
                                      • excludeDevs
                                          - attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> -
                                          - - Die Device/Reading-Kombinationen "devspec1#Reading", "devspec2#Reading" bis "devspec..." werden vom Logging in die + attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> +

                                          + + Die Device/Reading-Kombinationen "devspec1#Reading", "devspec2#Reading" bis "devspec..." werden vom Logging in die Datenbank global ausgeschlossen.
                                          Die Angabe eines auszuschließenden Readings ist optional.
                                          - Somit können Device/Readings explizit bzw. konsequent vom Logging ausgeschlossen werden ohne Berücksichtigung anderer + Somit können Device/Readings explizit bzw. konsequent vom Logging ausgeschlossen werden ohne Berücksichtigung anderer Excludes oder Includes (z.B. im DEF). - Die auszuschließenden Devices können als Geräte-Spezifikation angegeben werden. + Die auszuschließenden Devices können als Geräte-Spezifikation angegeben werden. Für weitere Details bezüglich devspec siehe Geräte-Spezifikation.

                                          - + Beispiel
                                          attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog @@ -9301,17 +9583,18 @@ attr SMA_Energymeter DbLogValueFn
                                            - +
                                          • expimpdir
                                              - attr <device> expimpdir <directory> -
                                              - - In diesem Verzeichnis wird das Cachefile beim Export angelegt bzw. beim Import gesucht. Siehe set-Kommandos - "exportCache" bzw. "importCachefile". Das Default-Verzeichnis ist "(global->modpath)/log/". + attr <device> expimpdir <directory> +


                                              + + In diesem Verzeichnis wird das Cachefile beim Export angelegt bzw. beim Import gesucht. Siehe set-Kommandos + exportCache bzw. importCachefile. + Das Default-Verzeichnis ist "(global->modpath)/log/". Das im Attribut angegebene Verzeichnis muss vorhanden und beschreibbar sein.

                                              - + Beispiel
                                              attr <device> expimpdir /opt/fhem/cache/ @@ -9320,15 +9603,15 @@ attr SMA_Energymeter DbLogValueFn

                                            - +
                                              - +
                                            • exportCacheAppend
                                                attr <device> exportCacheAppend [1|0] -
                                                - +


                                                + Wenn gesetzt, wird beim Export des Cache ("set <device> exportCache") der Cacheinhalt an das neueste bereits vorhandene Exportfile angehängt. Ist noch kein Exportfile vorhanden, wird es neu angelegt.
                                                Ist das Attribut nicht gesetzt, wird bei jedem Exportvorgang ein neues Exportfile angelegt. (default)
                                                @@ -9336,69 +9619,69 @@ attr SMA_Energymeter DbLogValueFn

                                              - +
                                                - +
                                              • noNotifyDev
                                                  attr <device> noNotifyDev [1|0] -
                                                  - +

                                                  + Erzwingt dass NOTIFYDEV nicht gesetzt und somit nicht verwendet wird.

                                              - +
                                                - +
                                              • noSupportPK
                                                  attr <device> noSupportPK [1|0] -
                                                  - +

                                                  + Deaktiviert die programmtechnische Unterstützung eines gesetzten Primary Key durch das Modul.

                                              - +
                                                - +
                                              • showproctime
                                                  attr <device> showproctime [1|0] -
                                                  - +

                                                  + Wenn gesetzt, zeigt das Reading "sql_processing_time" die benötigte Abarbeitungszeit (in Sekunden) für die SQL-Ausführung der durchgeführten Funktion. Dabei wird nicht ein einzelnes SQL-Statement, sondern die Summe aller notwendigen SQL-Abfragen innerhalb der jeweiligen Funktion betrachtet. Das Reading "background_processing_time" zeigt die im Kindprozess BlockingCall verbrauchte Zeit.
                                                  - +

                                              - +
                                                - +
                                              • showNotifyTime
                                                  attr <device> showNotifyTime [1|0] -
                                                  - - Wenn gesetzt, zeigt das Reading "notify_processing_time" die benötigte Abarbeitungszeit (in Sekunden) für die +

                                                  + + Wenn gesetzt, zeigt das Reading "notify_processing_time" die benötigte Abarbeitungszeit (in Sekunden) für die Abarbeitung der DbLog Notify-Funktion. Das Attribut ist für Performance Analysen geeignet und hilft auch die Unterschiede im Zeitbedarf bei der Umschaltung des synchronen in den asynchronen Modus festzustellen.
                                                  - +

                                              - +
                                                - +
                                              • SQLiteCacheSize
                                                  @@ -9414,12 +9697,12 @@ attr SMA_Energymeter DbLogValueFn
                                                    - +
                                                  • SQLiteJournalMode
                                                      attr <device> SQLiteJournalMode [WAL|off] -
                                                      +


                                                      Moderne SQLite Datenbanken werden mit einem Write-Ahead-Log (WAL) geöffnet, was optimale Datenintegrität und gute Performance gewährleistet.
                                                      @@ -9434,86 +9717,87 @@ attr SMA_Energymeter DbLogValueFn
                                                        - +
                                                      • syncEvents
                                                          attr <device> syncEvents [1|0] -
                                                          - +

                                                          + es werden Events für Reading NextSync erzeugt.

                                                      - +
                                                        - +
                                                      • syncInterval
                                                          attr <device> syncInterval <n> -
                                                          - +

                                                          + Wenn DbLog im asynchronen Modus betrieben wird (Attribut asyncMode=1), wird mit diesem Attribut das Intervall in Sekunden zur Speicherung der im Speicher gecachten Events in die Datenbank eingestellt. Der Defaultwert ist 30 Sekunden.
                                                          - +

                                                      - +
                                                        - +
                                                      • suppressAddLogV3
                                                          attr <device> suppressAddLogV3 [1|0] -
                                                          - +

                                                          + Wenn gesetzt werden verbose 3 Logeinträge durch die addLog-Funktion unterdrückt.

                                                      - +
                                                        - +
                                                      • suppressUndef
                                                          attr <device> suppressUndef -
                                                          - Unterdrueckt alle undef Werte die durch eine Get-Anfrage zb. Plot aus der Datenbank selektiert werden

                                                          +

                                                          + + Unterdrückt alle undef Werte die durch eine Get-Anfrage, z.B. Plot, aus der Datenbank selektiert werden. - Beispiel
                                                          - #DbLog eMeter:power:::$val=($val>1500)?undef:$val

                                                      - +
                                                        - +
                                                      • timeout
                                                          attr <device> timeout <n> -
                                                          +

                                                          + Setzt den Timeout-Wert für den Schreibzyklus in die Datenbank im asynchronen Modus (default 86400s).

                                                      - +
                                                        - +
                                                      • traceFlag
                                                          attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> -
                                                          - Bestimmt das Tracing von bestimmten Aktivitäten innerhalb des Datenbankinterfaces und Treibers. Das Attribut ist nur +

                                                          + + Bestimmt das Tracing von bestimmten Aktivitäten innerhalb des Datenbankinterfaces und Treibers. Das Attribut ist nur für den Fehler- bzw. Supportfall gedacht.

                                                          - +
                                                            -
                                    Current Events werden nur in die current-Tabelle geloggt. +
                                    Current Events werden nur in die current-Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet.
                                    History Events werden nur in die history-Tabelle geloggt. Es wird keine DropDown-Liste mit Vorschlägen bei der SVG-Erstellung erzeugt.
                                    Current/History Events werden sowohl in die current- also auch in die hitory Tabelle geloggt. +
                                    Current/History Events werden sowohl in die current- also auch in die hitory Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet.
                                    SampleFill/History Events werden nur in die history-Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet und +
                                    SampleFill/History Events werden nur in die history-Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet und kann zur Erzeugung einer DropDown-Liste mittels einem DbRep-Device
                                    "set <DbRep-Name> tableCurrentFillup" mit einem einstellbaren Extract der history-Tabelle gefüllt werden (advanced Feature).
                                    +
                                    @@ -9525,66 +9809,68 @@ attr SMA_Energymeter DbLogValueFn
                                    ALL schaltet alle DBI- und Treiberflags an.
                                    SQL verfolgt die SQL Statement Ausführung. (Default)

                                  - +

                              - +
                                - +
                              • traceHandles
                                  attr <device> traceHandles <n> -
                                  - +

                                  + Wenn gesetzt, werden alle <n> Sekunden die systemweit vorhandenen Datenbank-Handles im Logfile ausgegeben. Dieses Attribut ist nur für Supportzwecke relevant. (Default: 0 = ausgeschaltet)
                                  - +

                              - +
                                - +
                              • traceLevel
                                  attr <device> traceLevel <0|1|2|3|4|5|6|7> -
                                  +

                                  + Schaltet die Trace-Funktion des Moduls ein.
                                  - Achtung ! Das Attribut ist nur für den Fehler- bzw. Supportfall gedacht. Es werden sehr viele Einträge in + Achtung ! Das Attribut ist nur für den Fehler- bzw. Supportfall gedacht. Es werden sehr viele Einträge in das FHEM Logfile vorgenommen !

                                  - +
                                    - +
                                    - -
                                    0 Tracing ist disabled. (Default)
                                    1 Tracing von DBI Top-Level Methoden mit deren Ergebnissen und Fehlern
                                    2 Wie oben. Zusätzlich Top-Level Methodeneintäge mit Parametern.
                                    3 Wie oben. Zusätzliche werden einige High-Level Informationen des Treibers und +
                                    3 Wie oben. Zusätzliche werden einige High-Level Informationen des Treibers und einige interne Informationen des DBI hinzugefügt.
                                    4 Wie oben. Zusätzlich werden mehr detaillierte Informationen des Treibers +
                                    4 Wie oben. Zusätzlich werden mehr detaillierte Informationen des Treibers eingefügt.
                                    5-7 Wie oben, aber mit mehr und mehr internen Informationen.

                                  - +

                              - +
                                - +
                              • useCharfilter
                                  attr <device> useCharfilter [0|1] -
                                  +

                                  + wenn gesetzt, werden nur ASCII Zeichen von 32 bis 126 im Event akzeptiert. (default: 0)
                                  Das sind die Zeichen " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~".
                                  Umlaute und "€" werden umgesetzt (z.B. ä nach ae, € nach EUR).
                                  @@ -9594,21 +9880,21 @@ attr SMA_Energymeter DbLogValueFn
                                    - +
                                  • valueFn
                                      attr <device> valueFn {} -
                                      - - Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, $VALUE (Wert des Readings) und +

                                      + + Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, $VALUE (Wert des Readings) und $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt.
                                      Außerdem hat man Lesezugriff auf $EVENT, $LASTTIMESTAMP und $LASTVALUE zur Bewertung im Ausdruck.
                                      Die Variablen $LASTTIMESTAMP und $LASTVALUE enthalten Zeit und Wert des zuletzt protokollierten Datensatzes von $DEVICE / $READING.
                                      - Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden. Anderenfalls wird der + Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden. Anderenfalls wird der geänderte $timestamp nicht übernommen. Zusätzlich kann durch Setzen der Variable "$IGNORE=1" ein Datensatz vom Logging ausgeschlossen werden.

                                      - + Beispiele
                                      attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} @@ -9626,19 +9912,19 @@ attr SMA_Energymeter DbLogValueFn

                                    - +
                                      - +
                                    • verbose4Devs
                                        - attr <device> verbose4Devs <device1>,<device2>,<device..> -
                                        - - Mit verbose Level 4 werden nur Ausgaben bezüglich der in diesem Attribut aufgeführten Devices im Logfile protokolliert. Ohne dieses + attr <device> verbose4Devs <device1>,<device2>,<device..> +


                                        + + Mit verbose Level 4 werden nur Ausgaben bezüglich der in diesem Attribut aufgeführten Devices im Logfile protokolliert. Ohne dieses Attribut werden mit verbose 4 Ausgaben aller relevanten Devices im Logfile protokolliert. Die angegebenen Devices werden als Regex ausgewertet.

                                        - + Beispiel
                                        attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global @@ -9649,7 +9935,7 @@ attr SMA_Energymeter DbLogValueFn

                                      - +
                                    =end html_DE @@ -9691,7 +9977,7 @@ attr SMA_Energymeter DbLogValueFn "Time::HiRes": 0, "Time::Local": 0, "HttpUtils": 0, - "Encode": 0 + "Encode": 0 }, "recommends": { "FHEM::Meta": 0, @@ -9719,7 +10005,7 @@ attr SMA_Energymeter DbLogValueFn "x_branch": "dev", "x_filepath": "fhem/contrib/", "x_raw": "https://svn.fhem.de/fhem/trunk/fhem/contrib/DS_Starter/93_DbLog.pm" - } + } } } }