From 0cbc22edee5d33dd41993a127ef6dd8ad8435812 Mon Sep 17 00:00:00 2001 From: nasseeder1 Date: Sat, 23 May 2020 08:54:36 +0000 Subject: [PATCH] 93_DbLog: contrib 4.10.0 git-svn-id: https://svn.fhem.de/fhem/trunk@22012 2b470e98-0d58-463d-a4d8-8e2adae1ed80 --- fhem/contrib/DS_Starter/93_DbLog.pm | 3404 ++++++++++++++------------- 1 file changed, 1746 insertions(+), 1658 deletions(-) diff --git a/fhem/contrib/DS_Starter/93_DbLog.pm b/fhem/contrib/DS_Starter/93_DbLog.pm index 6dbe1c368..226b00641 100644 --- a/fhem/contrib/DS_Starter/93_DbLog.pm +++ b/fhem/contrib/DS_Starter/93_DbLog.pm @@ -1,5 +1,5 @@ ############################################################################################################################################ -# $Id: 93_DbLog.pm 21051 2020-01-25 20:53:06Z DS_Starter $ +# $Id: 93_DbLog.pm 21801 2020-04-29 09:37:37Z DS_Starter $ # # 93_DbLog.pm # written by Dr. Boris Neubert 2007-12-30 @@ -30,6 +30,10 @@ no if $] >= 5.017011, warnings => 'experimental::smartmatch'; # Version History intern by DS_Starter: our %DbLog_vNotesIntern = ( + "4.10.0" => "22.05.2020 improve configCheck, new vars \$LASTTIMESTAMP and \$LASTVALUE in valueFn / DbLogValueFn, Forum:#111423 ", + "4.9.13" => "12.05.2020 commandRef changed, AutoInactiveDestroy => 1 for dbh ", + "4.9.12" => "28.04.2020 fix line breaks in set function, Forum: #110673 ", + "4.9.11" => "22.03.2020 logfile entry if DBI module not installed, Forum: #109382 ", "4.9.10" => "31.01.2020 fix warning, Forum: #107950 ", "4.9.9" => "21.01.2020 default ParseEvent changed again, Forum: #106769 ", "4.9.8" => "17.01.2020 adjust configCheck with plotEmbed check. Forum: #107383 ", @@ -238,7 +242,7 @@ my %columns = ("DEVICE" => 64, "VALUE" => 128, "UNIT" => 32 ); - + sub DbLog_dbReadings($@); sub DbLog_showChildHandles($$$$); @@ -255,38 +259,38 @@ sub DbLog_Initialize($) { $hash->{SVG_regexpFn} = "DbLog_regexpFn"; $hash->{DelayedShutdownFn} = "DbLog_DelayedShutdown"; $hash->{AttrList} = "addStateEvent:0,1 ". - "asyncMode:1,0 ". + "asyncMode:1,0 ". "bulkInsert:1,0 ". "commitMode:basic_ta:on,basic_ta:off,ac:on_ta:on,ac:on_ta:off,ac:off_ta:on ". - "cacheEvents:2,1,0 ". - "cacheLimit ". + "cacheEvents:2,1,0 ". + "cacheLimit ". "colEvent ". "colReading ". - "colValue ". + "colValue ". "DbLogSelectionMode:Exclude,Include,Exclude/Include ". "DbLogType:Current,History,Current/History,SampleFill/History ". "dbSchema ". "defaultMinInterval:textField-long ". "disable:1,0 ". - "excludeDevs ". - "expimpdir ". + "excludeDevs ". + "expimpdir ". "exportCacheAppend:1,0 ". - "noSupportPK:1,0 ". - "noNotifyDev:1,0 ". - "showproctime:1,0 ". - "suppressAddLogV3:1,0 ". + "noSupportPK:1,0 ". + "noNotifyDev:1,0 ". + "showproctime:1,0 ". + "suppressAddLogV3:1,0 ". "suppressUndef:0,1 ". - "syncEvents:1,0 ". - "syncInterval ". - "showNotifyTime:1,0 ". + "syncEvents:1,0 ". + "syncInterval ". + "showNotifyTime:1,0 ". "traceFlag:SQL,CON,ENC,DBD,TXN,ALL ". "traceLevel:0,1,2,3,4,5,6,7 ". "traceHandles ". - "timeout ". - "useCharfilter:0,1 ". - "valueFn:textField-long ". - "verbose4Devs ". - $readingFnAttributes; + "timeout ". + "useCharfilter:0,1 ". + "valueFn:textField-long ". + "verbose4Devs ". + $readingFnAttributes; addToAttrList("DbLogInclude"); addToAttrList("DbLogExclude"); @@ -303,15 +307,17 @@ return; ############################################################### sub DbLog_Define($@) { my ($hash, $def) = @_; - my @a = split("[ \t][ \t]*", $def); + my $name = $hash->{NAME}; + my @a = split("[ \t][ \t]*", $def); - return "Error: Perl module ".$DbLogMMDBI." is missing. - Install it on Debian with: sudo apt-get install libdbi-perl" if($DbLogMMDBI); + if($DbLogMMDBI) { + Log3($name, 1, "DbLog $name - ERROR - Perl module ".$DbLogMMDBI." is missing. DbLog module is not loaded ! On Debian systems you can install it with \"sudo apt-get install libdbi-perl\" "); + return "Error: Perl module ".$DbLogMMDBI." is missing. Install it on Debian with: sudo apt-get install libdbi-perl"; + } return "wrong syntax: define DbLog configuration regexp" if(int(@a) != 4); - my $name = $hash->{NAME}; $hash->{CONFIGURATION} = $a[2]; my $regexp = $a[3]; @@ -341,7 +347,7 @@ sub DbLog_Define($@) { my $ret = DbLog_readCfg($hash); if ($ret) { # return on error while reading configuration - Log3($hash->{NAME}, 1, "DbLog $hash->{NAME} - Error while reading $hash->{CONFIGURATION}: '$ret' "); + Log3($name, 1, "DbLog $name - Error while reading $hash->{CONFIGURATION}: '$ret' "); return $ret; } @@ -411,21 +417,23 @@ sub DbLog_Attr(@) { unless ($aVal =~ /^[0-9]+$/) { return " The Value of $aName is not valid. Use only figures 0-9 !";} } - if ($hash->{MODEL} !~ /MYSQL|POSTGRESQL/ && $aName =~ /dbSchema/) { + if ($hash->{MODEL} !~ /MYSQL|POSTGRESQL/ && $aName =~ /dbSchema/) { return "\"$aName\" is not valid for database model \"$hash->{MODEL}\""; - } - + } + if( $aName eq 'valueFn' ) { - my %specials= ( - "%TIMESTAMP" => $name, - "%DEVICE" => $name, - "%DEVICETYPE" => $name, - "%EVENT" => $name, - "%READING" => $name, - "%VALUE" => $name, - "%UNIT" => $name, - "%IGNORE" => $name, - "%CN" => $name, + my %specials= ( + "%TIMESTAMP" => $name, + "%LASTTIMESTAMP" => $name, + "%DEVICE" => $name, + "%DEVICETYPE" => $name, + "%EVENT" => $name, + "%READING" => $name, + "%VALUE" => $name, + "%LASTVALUE" => $name, + "%UNIT" => $name, + "%IGNORE" => $name, + "%CN" => $name ); my $err = perlSyntaxCheck($aVal, %specials); return $err if($err); @@ -439,20 +447,20 @@ sub DbLog_Attr(@) { if($aName eq "colEvent" || $aName eq "colReading" || $aName eq "colValue") { if ($cmd eq "set" && $aVal) { unless ($aVal =~ /^[0-9]+$/) { return " The Value of $aName is not valid. Use only figures 0-9 !";} - } - InternalTimer(gettimeofday()+0.5, "DbLog_setinternalcols", $hash, 0); + } + InternalTimer(gettimeofday()+0.5, "DbLog_setinternalcols", $hash, 0); } if($aName eq "asyncMode") { if ($cmd eq "set" && $aVal) { $hash->{MODE} = "asynchronous"; - InternalTimer(gettimeofday()+2, "DbLog_execmemcache", $hash, 0); + InternalTimer(gettimeofday()+2, "DbLog_execmemcache", $hash, 0); } else { - $hash->{MODE} = "synchronous"; + $hash->{MODE} = "synchronous"; delete($defs{$name}{READINGS}{NextSync}); - delete($defs{$name}{READINGS}{CacheUsage}); - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); - } + delete($defs{$name}{READINGS}{CacheUsage}); + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + } } if($aName eq "commitMode") { @@ -464,24 +472,24 @@ sub DbLog_Attr(@) { if($aName eq "showproctime") { if ($cmd ne "set" || !$aVal) { - delete($defs{$name}{READINGS}{background_processing_time}); - delete($defs{$name}{READINGS}{sql_processing_time}); - } + delete($defs{$name}{READINGS}{background_processing_time}); + delete($defs{$name}{READINGS}{sql_processing_time}); + } } if($aName eq "showNotifyTime") { if ($cmd ne "set" || !$aVal) { - delete($defs{$name}{READINGS}{notify_processing_time}); - } + delete($defs{$name}{READINGS}{notify_processing_time}); + } } if($aName eq "noNotifyDev") { my $regexp = $hash->{REGEXP}; if ($cmd eq "set" && $aVal) { - delete($hash->{NOTIFYDEV}); - } else { - notifyRegexpChanged($hash, $regexp); - } + delete($hash->{NOTIFYDEV}); + } else { + notifyRegexpChanged($hash, $regexp); + } } if ($aName eq "disable") { @@ -492,11 +500,11 @@ sub DbLog_Attr(@) { $do = 0 if($cmd eq "del"); my $val = ($do == 1 ? "disabled" : "active"); - # letzter CacheSync vor disablen - DbLog_execmemcache($hash) if($do == 1); - + # letzter CacheSync vor disablen + DbLog_execmemcache($hash) if($do == 1); + readingsSingleUpdate($hash, "state", $val, 1); - $hash->{HELPER}{OLDSTATE} = $val; + $hash->{HELPER}{OLDSTATE} = $val; if ($do == 0) { InternalTimer(gettimeofday()+2, "DbLog_execmemcache", $hash, 0) if($async); @@ -507,7 +515,7 @@ sub DbLog_Attr(@) { if ($aName eq "traceHandles") { if($cmd eq "set") { unless ($aVal =~ /^[0-9]+$/) {return " The Value of $aName is not valid. Use only figures 0-9 without decimal places !";} - } + } RemoveInternalTimer($hash, "DbLog_startShowChildhandles"); if($cmd eq "set") { $do = ($aVal) ? 1 : 0; @@ -540,70 +548,88 @@ return undef; ################################################################ sub DbLog_Set($@) { my ($hash, @a) = @_; - my $name = $hash->{NAME}; - my $async = AttrVal($name, "asyncMode", undef); - my $usage = "Unknown argument, choose one of reduceLog reduceLogNbl reopen rereadcfg:noArg count:noArg countNbl:noArg - deleteOldDays deleteOldDaysNbl userCommand clearReadings:noArg - eraseReadings:noArg addLog "; - $usage .= "listCache:noArg addCacheLine purgeCache:noArg commitCache:noArg exportCache:nopurge,purgecache " if (AttrVal($name, "asyncMode", undef)); - $usage .= "configCheck:noArg "; + my $name = $hash->{NAME}; + my $async = AttrVal($name, "asyncMode", undef); + my $usage = "Unknown argument, choose one of ". + "reduceLog ". + "reduceLogNbl ". + "reopen ". + "rereadcfg:noArg ". + "count:noArg ". + "configCheck:noArg ". + "countNbl:noArg ". + "deleteOldDays ". + "deleteOldDaysNbl ". + "userCommand ". + "clearReadings:noArg ". + "eraseReadings:noArg ". + "addLog " + ; + + $usage .= "listCache:noArg ". + "addCacheLine ". + "purgeCache:noArg ". + "commitCache:noArg ". + "exportCache:nopurge,purgecache " + if (AttrVal($name, "asyncMode", undef)); + my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; - my (@logs,$dir); - + my (@logs,$dir); + my $dirdef = AttrVal("global", "logdir", $attr{global}{modpath}."/log/"); $dir = AttrVal($name, "expimpdir", $dirdef); $dir = $dir."/" if($dir !~ /.*\/$/); - - opendir(DIR,$dir); - my $sd = "cache_".$name."_"; + + opendir(DIR,$dir); + my $sd = "cache_".$name."_"; while (my $file = readdir(DIR)) { next unless (-f "$dir/$file"); next unless ($file =~ /^$sd/); push @logs,$file; } closedir(DIR); - my $cj = join(",",reverse(sort @logs)) if (@logs); - - if (@logs) { - $usage .= "importCachefile:".$cj." "; - } else { - $usage .= "importCachefile "; - } - - return $usage if(int(@a) < 2); - my $dbh = $hash->{DBHP}; - my $db = (split(/;|=/, $hash->{dbconn}))[1]; - my $ret; + my $cj = join(",",reverse(sort @logs)) if (@logs); + + if (@logs) { + $usage .= "importCachefile:".$cj." "; + } else { + $usage .= "importCachefile "; + } + + return $usage if(int(@a) < 2); + my $dbh = $hash->{DBHP}; + my $db = (split(/;|=/, $hash->{dbconn}))[1]; + my $ret; if ($a[1] eq 'reduceLog') { my ($od,$nd) = split(":",$a[2]); # $od - Tage älter als , $nd - Tage neuer als if ($nd && $nd <= $od) {return "The second day value must be greater than the first one ! ";} - if (defined($a[3]) && $a[3] !~ /^average$|^average=.+|^EXCLUDE=.+$|^INCLUDE=.+$/i) { + if (defined($a[3]) && $a[3] !~ /^average$|^average=.+|^EXCLUDE=.+$|^INCLUDE=.+$/i) { return "ReduceLog syntax error in set command. Please see commandref for help."; } if (defined $a[2] && $a[2] =~ /(^\d+$)|(^\d+:\d+$)/) { $ret = DbLog_reduceLog($hash,@a); - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } else { Log3($name, 1, "DbLog $name: reduceLog error, no given."); $ret = "reduceLog error, no given."; } } - elsif ($a[1] eq 'reduceLogNbl') { + elsif ($a[1] eq 'reduceLogNbl') { my ($od,$nd) = split(":",$a[2]); # $od - Tage älter als , $nd - Tage neuer als if ($nd && $nd <= $od) {return "The second day value must be greater than the first one ! ";} - if (defined($a[3]) && $a[3] !~ /^average$|^average=.+|^EXCLUDE=.+$|^INCLUDE=.+$/i) { + if (defined($a[3]) && $a[3] !~ /^average$|^average=.+|^EXCLUDE=.+$|^INCLUDE=.+$/i) { return "ReduceLogNbl syntax error in set command. Please see commandref for help."; } if (defined $a[2] && $a[2] =~ /(^\d+$)|(^\d+:\d+$)/) { if ($hash->{HELPER}{REDUCELOG_PID} && $hash->{HELPER}{REDUCELOG_PID}{pid} !~ m/DEAD/) { $ret = "reduceLogNbl already in progress. Please wait until the running process is finished."; } else { - delete $hash->{HELPER}{REDUCELOG_PID}; - my @b = @a; - shift(@b); - readingsSingleUpdate($hash,"reduceLogState","@b started",1); + delete $hash->{HELPER}{REDUCELOG_PID}; + my @b = @a; + shift(@b); + readingsSingleUpdate($hash,"reduceLogState","@b started",1); $hash->{HELPER}{REDUCELOG} = \@a; $hash->{HELPER}{REDUCELOG_PID} = BlockingCall("DbLog_reduceLogNbl","$name","DbLog_reduceLogNbl_finished"); return; @@ -613,20 +639,20 @@ sub DbLog_Set($@) { $ret = "reduceLogNbl error, no given."; } } - elsif ($a[1] eq 'clearReadings') { + elsif ($a[1] eq 'clearReadings') { my @allrds = keys%{$defs{$name}{READINGS}}; - foreach my $key(@allrds) { - next if($key =~ m/state/ || $key =~ m/CacheUsage/ || $key =~ m/NextSync/); - readingsSingleUpdate($hash,$key," ",0); + foreach my $key(@allrds) { + next if($key =~ m/state/ || $key =~ m/CacheUsage/ || $key =~ m/NextSync/); + readingsSingleUpdate($hash,$key," ",0); } } - elsif ($a[1] eq 'eraseReadings') { + elsif ($a[1] eq 'eraseReadings') { my @allrds = keys%{$defs{$name}{READINGS}}; - foreach my $key(@allrds) { + foreach my $key(@allrds) { delete($defs{$name}{READINGS}{$key}) if($key !~ m/^state$/); } - } - elsif ($a[1] eq 'addLog') { + } + elsif ($a[1] eq 'addLog') { unless ($a[2]) { return "The argument of $a[1] is not valid. Please check commandref.";} my $nce = ("\!useExcludes" ~~ @a)?1:0; map(s/\!useExcludes//g, @a); @@ -636,50 +662,50 @@ sub DbLog_Set($@) { ($cn) = ($t =~ /^.*CN=(\w+).*$/); map(s/CN=$cn//g, @a); } - DbLog_AddLog($hash,$a[2],$a[3],$nce,$cn); + DbLog_AddLog($hash,$a[2],$a[3],$nce,$cn); my $skip_trigger = 1; # kein Event erzeugen falls addLog device/reading not found aber Abarbeitung erfolgreich return undef,$skip_trigger; - } + } elsif ($a[1] eq 'reopen') { return if(IsDisabled($name)); - if ($dbh) { + if ($dbh) { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; if ($@) { Log3($name, 2, "DbLog $name -> Error commit $history - $@"); } $dbh->disconnect(); } - if (!$a[2]) { - Log3($name, 3, "DbLog $name: Reopen requested."); + if (!$a[2]) { + Log3($name, 3, "DbLog $name: Reopen requested."); DbLog_ConnectPush($hash); if($hash->{HELPER}{REOPEN_RUNS}) { - delete $hash->{HELPER}{REOPEN_RUNS}; + delete $hash->{HELPER}{REOPEN_RUNS}; delete $hash->{HELPER}{REOPEN_RUNS_UNTIL}; RemoveInternalTimer($hash, "DbLog_reopen"); } - DbLog_execmemcache($hash) if($async); + DbLog_execmemcache($hash) if($async); $ret = "Reopen executed."; - } else { - unless ($a[2] =~ /^[0-9]+$/) { return " The Value of $a[1]-time is not valid. Use only figures 0-9 !";} - # Statusbit "Kein Schreiben in DB erlauben" wenn reopen mit Zeitangabe + } else { + unless ($a[2] =~ /^[0-9]+$/) { return " The Value of $a[1]-time is not valid. Use only figures 0-9 !";} + # Statusbit "Kein Schreiben in DB erlauben" wenn reopen mit Zeitangabe $hash->{HELPER}{REOPEN_RUNS} = $a[2]; - - # falls ein hängender Prozess vorhanden ist -> löschen - BlockingKill($hash->{HELPER}{".RUNNING_PID"}) if($hash->{HELPER}{".RUNNING_PID"}); + + # falls ein hängender Prozess vorhanden ist -> löschen + BlockingKill($hash->{HELPER}{".RUNNING_PID"}) if($hash->{HELPER}{".RUNNING_PID"}); BlockingKill($hash->{HELPER}{REDUCELOG_PID}) if($hash->{HELPER}{REDUCELOG_PID}); BlockingKill($hash->{HELPER}{COUNT_PID}) if($hash->{HELPER}{COUNT_PID}); BlockingKill($hash->{HELPER}{DELDAYS_PID}) if($hash->{HELPER}{DELDAYS_PID}); - delete $hash->{HELPER}{".RUNNING_PID"}; - delete $hash->{HELPER}{COUNT_PID}; - delete $hash->{HELPER}{DELDAYS_PID}; - delete $hash->{HELPER}{REDUCELOG_PID}; - - my $ts = (split(" ",FmtDateTime(gettimeofday()+$a[2])))[1]; - Log3($name, 2, "DbLog $name: Connection closed until $ts ($a[2] seconds)."); - readingsSingleUpdate($hash, "state", "closed until $ts ($a[2] seconds)", 1); + delete $hash->{HELPER}{".RUNNING_PID"}; + delete $hash->{HELPER}{COUNT_PID}; + delete $hash->{HELPER}{DELDAYS_PID}; + delete $hash->{HELPER}{REDUCELOG_PID}; + + my $ts = (split(" ",FmtDateTime(gettimeofday()+$a[2])))[1]; + Log3($name, 2, "DbLog $name: Connection closed until $ts ($a[2] seconds)."); + readingsSingleUpdate($hash, "state", "closed until $ts ($a[2] seconds)", 1); InternalTimer(gettimeofday()+$a[2], "DbLog_reopen", $hash, 0); - $hash->{HELPER}{REOPEN_RUNS_UNTIL} = $ts; - } + $hash->{HELPER}{REOPEN_RUNS_UNTIL} = $ts; + } } elsif ($a[1] eq 'rereadcfg') { Log3($name, 3, "DbLog $name: Rereadcfg requested."); @@ -693,36 +719,36 @@ sub DbLog_Set($@) { DbLog_ConnectPush($hash); $ret = "Rereadcfg executed."; } - elsif ($a[1] eq 'purgeCache') { - delete $data{DbLog}{$name}{cache}; - readingsSingleUpdate($hash, 'CacheUsage', 0, 1); - } - elsif ($a[1] eq 'commitCache') { - DbLog_execmemcache($hash); - } - elsif ($a[1] eq 'listCache') { - my $cache; - foreach my $key (sort{$a <=>$b}keys %{$data{DbLog}{$name}{cache}{memcache}}) { - $cache .= $key." => ".$data{DbLog}{$name}{cache}{memcache}{$key}."\n"; - } - return $cache; - } - elsif ($a[1] eq 'addCacheLine') { - if(!$a[2]) { + elsif ($a[1] eq 'purgeCache') { + delete $data{DbLog}{$name}{cache}; + readingsSingleUpdate($hash, 'CacheUsage', 0, 1); + } + elsif ($a[1] eq 'commitCache') { + DbLog_execmemcache($hash); + } + elsif ($a[1] eq 'listCache') { + my $cache; + foreach my $key (sort{$a <=>$b}keys %{$data{DbLog}{$name}{cache}{memcache}}) { + $cache .= $key." => ".$data{DbLog}{$name}{cache}{memcache}{$key}."\n"; + } + return $cache; + } + elsif ($a[1] eq 'addCacheLine') { + if(!$a[2]) { return "Syntax error in set $a[1] command. Use this line format: YYYY-MM-DD HH:MM:SS||||||[] "; } - my @b = @a; - shift @b; - shift @b; - my $aa; - foreach my $k (@b) { - $aa .= "$k "; - } - chop($aa); #letztes Leerzeichen entfernen - $aa = DbLog_charfilter($aa) if(AttrVal($name, "useCharfilter",0)); - - my ($i_timestamp, $i_dev, $i_type, $i_evt, $i_reading, $i_val, $i_unit) = split("\\|",$aa); - if($i_timestamp !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})$/ || !$i_dev || !$i_reading) { + my @b = @a; + shift @b; + shift @b; + my $aa; + foreach my $k (@b) { + $aa .= "$k "; + } + chop($aa); #letztes Leerzeichen entfernen + $aa = DbLog_charfilter($aa) if(AttrVal($name, "useCharfilter",0)); + + my ($i_timestamp, $i_dev, $i_type, $i_evt, $i_reading, $i_val, $i_unit) = split("\\|",$aa); + if($i_timestamp !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})$/ || !$i_dev || !$i_reading) { return "Syntax error in set $a[1] command. Use this line format: YYYY-MM-DD HH:MM:SS||||||[] "; } my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($i_timestamp =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); @@ -732,22 +758,22 @@ sub DbLog_Set($@) { my @l = split (/at/, $@); return " Timestamp is out of range - $l[0]"; } - DbLog_addCacheLine($hash,$i_timestamp,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit); - - } - elsif ($a[1] eq 'configCheck') { - my $check = DbLog_configcheck($hash); - return $check; - } - elsif ($a[1] eq 'exportCache') { - my $cln; - my $crows = 0; - my ($out,$outfile,$error); - my $now = strftime('%Y-%m-%d_%H-%M-%S',localtime); - - # return wenn "reopen" mit Ablaufzeit gestartet ist oder disabled, nicht im asynch-Mode - return if(IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}); - return if(!AttrVal($name, "asyncMode", undef)); + DbLog_addCacheLine($hash,$i_timestamp,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit); + + } + elsif ($a[1] eq 'configCheck') { + my $check = DbLog_configcheck($hash); + return $check; + } + elsif ($a[1] eq 'exportCache') { + my $cln; + my $crows = 0; + my ($out,$outfile,$error); + my $now = strftime('%Y-%m-%d_%H-%M-%S',localtime); + + # return wenn "reopen" mit Ablaufzeit gestartet ist oder disabled, nicht im asynch-Mode + return if(IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}); + return if(!AttrVal($name, "asyncMode", undef)); if(@logs && AttrVal($name, "exportCacheAppend", 0)) { # exportiertes Cachefile existiert und es soll an das neueste angehängt werden @@ -760,110 +786,110 @@ sub DbLog_Set($@) { if(open(FH, $out)) { binmode (FH); } else { - readingsSingleUpdate($hash, "lastCachefile", $outfile." - Error - ".$!, 1); + readingsSingleUpdate($hash, "lastCachefile", $outfile." - Error - ".$!, 1); $error = "could not open ".$outfile.": ".$!; } if(!$error) { - foreach my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { + foreach my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { $cln = $data{DbLog}{$name}{cache}{memcache}{$key}."\n"; print FH $cln ; - $crows++; - } - close(FH); + $crows++; + } + close(FH); readingsSingleUpdate($hash, "lastCachefile", $outfile." (".$crows." cache rows exported)", 1); } - - # readingsSingleUpdate($hash, "state", $crows." cache rows exported to ".$outfile, 1); + + # readingsSingleUpdate($hash, "state", $crows." cache rows exported to ".$outfile, 1); my $state = $error?$error:(IsDisabled($name))?"disabled":"connected"; my $evt = ($state eq $hash->{HELPER}{OLDSTATE})?0:1; readingsSingleUpdate($hash, "state", $state, $evt); $hash->{HELPER}{OLDSTATE} = $state; - + Log3($name, 3, "DbLog $name: $crows cache rows exported to $outfile."); - - if (lc($a[-1]) =~ m/^purgecache/i) { - delete $data{DbLog}{$name}{cache}; + + if (lc($a[-1]) =~ m/^purgecache/i) { + delete $data{DbLog}{$name}{cache}; readingsSingleUpdate($hash, 'CacheUsage', 0, 1); - Log3($name, 3, "DbLog $name: Cache purged after exporting rows to $outfile."); - } - return; - } - elsif ($a[1] eq 'importCachefile') { - my $cln; - my $crows = 0; - my $infile; - my @row_array; - readingsSingleUpdate($hash, "lastCachefile", "", 0); - - # return wenn "reopen" mit Ablaufzeit gestartet ist oder disabled - return if(IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}); - - if (!$a[2]) { - return "Wrong function-call. Use set importCachefile without directory (see attr expimpdir)." ; - } else { - $infile = $dir.$a[2]; - } - + Log3($name, 3, "DbLog $name: Cache purged after exporting rows to $outfile."); + } + return; + } + elsif ($a[1] eq 'importCachefile') { + my $cln; + my $crows = 0; + my $infile; + my @row_array; + readingsSingleUpdate($hash, "lastCachefile", "", 0); + + # return wenn "reopen" mit Ablaufzeit gestartet ist oder disabled + return if(IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}); + + if (!$a[2]) { + return "Wrong function-call. Use set importCachefile without directory (see attr expimpdir)." ; + } else { + $infile = $dir.$a[2]; + } + if (open(FH, "$infile")) { binmode (FH); } else { return "could not open ".$infile.": ".$!; } while () { - my $row = $_; - $row = DbLog_charfilter($row) if(AttrVal($name, "useCharfilter",0)); - push(@row_array, $row); - $crows++; - } - close(FH); - + my $row = $_; + $row = DbLog_charfilter($row) if(AttrVal($name, "useCharfilter",0)); + push(@row_array, $row); + $crows++; + } + close(FH); + if(@row_array) { my $error = DbLog_Push($hash, 1, @row_array); - if($error) { - readingsSingleUpdate($hash, "lastCachefile", $infile." - Error - ".$!, 1); + if($error) { + readingsSingleUpdate($hash, "lastCachefile", $infile." - Error - ".$!, 1); readingsSingleUpdate($hash, "state", $error, 1); - Log3 $name, 5, "DbLog $name -> DbLog_Push Returncode: $error"; - } else { - unless(rename($dir.$a[2], $dir."impdone_".$a[2])) { - Log3($name, 2, "DbLog $name: cachefile $infile couldn't be renamed after import !"); - } - readingsSingleUpdate($hash, "lastCachefile", $infile." import successful", 1); - readingsSingleUpdate($hash, "state", $crows." cache rows processed from ".$infile, 1); - Log3($name, 3, "DbLog $name: $crows cache rows processed from $infile."); - } + Log3 $name, 5, "DbLog $name -> DbLog_Push Returncode: $error"; + } else { + unless(rename($dir.$a[2], $dir."impdone_".$a[2])) { + Log3($name, 2, "DbLog $name: cachefile $infile couldn't be renamed after import !"); + } + readingsSingleUpdate($hash, "lastCachefile", $infile." import successful", 1); + readingsSingleUpdate($hash, "state", $crows." cache rows processed from ".$infile, 1); + Log3($name, 3, "DbLog $name: $crows cache rows processed from $infile."); + } } else { - readingsSingleUpdate($hash, "state", "no rows in ".$infile, 1); - Log3($name, 3, "DbLog $name: $infile doesn't contain any rows - no imports done."); - } - - return; - } + readingsSingleUpdate($hash, "state", "no rows in ".$infile, 1); + Log3($name, 3, "DbLog $name: $infile doesn't contain any rows - no imports done."); + } + + return; + } elsif ($a[1] eq 'count') { $dbh = DbLog_ConnectNewDBH($hash); if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - count - DB connect not possible"); - return; + return; } else { Log3($name, 4, "DbLog $name: Records count requested."); - my $c = $dbh->selectrow_array("SELECT count(*) FROM $history"); + my $c = $dbh->selectrow_array("SELECT count(*) FROM $history"); readingsSingleUpdate($hash, 'countHistory', $c ,1); $c = $dbh->selectrow_array("SELECT count(*) FROM $current"); readingsSingleUpdate($hash, 'countCurrent', $c ,1); - $dbh->disconnect(); - - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); - } + $dbh->disconnect(); + + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + } } - elsif ($a[1] eq 'countNbl') { + elsif ($a[1] eq 'countNbl') { if ($hash->{HELPER}{COUNT_PID} && $hash->{HELPER}{COUNT_PID}{pid} !~ m/DEAD/){ $ret = "DbLog count already in progress. Please wait until the running process is finished."; } else { delete $hash->{HELPER}{COUNT_PID}; $hash->{HELPER}{COUNT_PID} = BlockingCall("DbLog_countNbl","$name","DbLog_countNbl_finished"); return; - } + } } elsif ($a[1] eq 'deleteOldDays') { Log3 ($name, 3, "DbLog $name -> Deletion of records older than $a[2] days in database $db requested"); @@ -872,7 +898,7 @@ sub DbLog_Set($@) { $dbh = DbLog_ConnectNewDBH($hash); if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - deleteOldDays - DB connect not possible"); - return; + return; } else { $cmd = "delete from $history where TIMESTAMP < "; @@ -883,24 +909,24 @@ sub DbLog_Set($@) { if(defined($cmd)) { $c = $dbh->do($cmd); - $c = 0 if($c == 0E0); - eval {$dbh->commit() if(!$dbh->{AutoCommit});}; - $dbh->disconnect(); - Log3 ($name, 3, "DbLog $name -> deleteOldDays finished. $c entries of database $db deleted."); + $c = 0 if($c == 0E0); + eval {$dbh->commit() if(!$dbh->{AutoCommit});}; + $dbh->disconnect(); + Log3 ($name, 3, "DbLog $name -> deleteOldDays finished. $c entries of database $db deleted."); readingsSingleUpdate($hash, 'lastRowsDeleted', $c ,1); } - - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } - elsif ($a[1] eq 'deleteOldDaysNbl') { + elsif ($a[1] eq 'deleteOldDaysNbl') { if (defined $a[2] && $a[2] =~ /^\d+$/) { if ($hash->{HELPER}{DELDAYS_PID} && $hash->{HELPER}{DELDAYS_PID}{pid} !~ m/DEAD/) { $ret = "deleteOldDaysNbl already in progress. Please wait until the running process is finished."; } else { - delete $hash->{HELPER}{DELDAYS_PID}; + delete $hash->{HELPER}{DELDAYS_PID}; $hash->{HELPER}{DELDAYS} = $a[2]; - Log3 ($name, 3, "DbLog $name -> Deletion of records older than $a[2] days in database $db requested"); + Log3 ($name, 3, "DbLog $name -> Deletion of records older than $a[2] days in database $db requested"); $hash->{HELPER}{DELDAYS_PID} = BlockingCall("DbLog_deldaysNbl","$name","DbLog_deldaysNbl_done"); return; } @@ -913,7 +939,7 @@ sub DbLog_Set($@) { $dbh = DbLog_ConnectNewDBH($hash); if(!$dbh) { Log3($name, 1, "DbLog $name: DBLog_Set - userCommand - DB connect not possible"); - return; + return; } else { Log3($name, 4, "DbLog $name: userCommand execution requested."); my ($c, @cmd, $sql); @@ -925,17 +951,17 @@ sub DbLog_Set($@) { $dbh->{PrintError} = 0; my $error; eval { $c = $dbh->selectrow_array($sql); }; - if($@) { + if($@) { $error = $@; Log3($name, 1, "DbLog $name: DBLog_Set - $error"); } my $res = $error?$error:(defined($c))?$c:"no result"; - Log3($name, 4, "DbLog $name: DBLog_Set - userCommand - result: $res"); + Log3($name, 4, "DbLog $name: DBLog_Set - userCommand - result: $res"); readingsSingleUpdate($hash, 'userCommandResult', $res ,1); - $dbh->disconnect(); - - InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); + $dbh->disconnect(); + + InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); } } else { $ret = $usage; } @@ -964,8 +990,8 @@ sub DbLog_regexpFn($$) { $ret .= '|' if( $ret ); no warnings 'uninitialized'; # Forum:74690, bug unitialized - $ret .= $fld[0] .'.'. $fld[1]; - use warnings; + $ret .= $fld[0] .'.'. $fld[1]; + use warnings; } return $ret; @@ -1281,11 +1307,11 @@ sub DbLog_Log($$) { $vb4show = 1; } else { foreach (@vb4devs) { - if($dev_name =~ m/$_/i) { - $vb4show = 1; - last; - } - } + if($dev_name =~ m/$_/i) { + $vb4show = 1; + last; + } + } } if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}) { @@ -1326,16 +1352,16 @@ sub DbLog_Log($$) { my $next = 0; my $event = $events->[$i]; $event = "" if(!defined($event)); - $event = DbLog_charfilter($event) if(AttrVal($name, "useCharfilter",0)); + $event = DbLog_charfilter($event) if(AttrVal($name, "useCharfilter",0)); Log3 $name, 4, "DbLog $name -> check Device: $dev_name , Event: $event" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - - if($dev_name =~ m/^$re$/ || "$dev_name:$event" =~ m/^$re$/ || $DbLogSelectionMode eq 'Include') { - my $timestamp = $ts_0; + + if($dev_name =~ m/^$re$/ || "$dev_name:$event" =~ m/^$re$/ || $DbLogSelectionMode eq 'Include') { + my $timestamp = $ts_0; $timestamp = $dev_hash->{CHANGETIME}[$i] if(defined($dev_hash->{CHANGETIME}[$i])); $event =~ s/\|/_ESC_/g; # escape Pipe "|" my @r = DbLog_ParseEvent($name,$dev_name, $dev_type, $event); - $reading = $r[0]; + $reading = $r[0]; $value = $r[1]; $unit = $r[2]; if(!defined $reading) {$reading = "";} @@ -1353,30 +1379,30 @@ sub DbLog_Log($$) { @excldr = split(",",$exc); foreach my $excl (@excldr) { ($ds,$rd) = split("#",$excl); - @exdvs = devspec2array($ds); - if(@exdvs) { + @exdvs = devspec2array($ds); + if(@exdvs) { # Log3 $name, 3, "DbLog $name -> excludeDevs: @exdvs"; - foreach (@exdvs) { + foreach (@exdvs) { if($rd) { - if("$dev_name:$reading" =~ m/^$_:$rd$/) { - Log3 $name, 4, "DbLog $name -> Device:Reading \"$dev_name:$reading\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + if("$dev_name:$reading" =~ m/^$_:$rd$/) { + Log3 $name, 4, "DbLog $name -> Device:Reading \"$dev_name:$reading\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); $next = 1; - } + } } else { - if($dev_name =~ m/^$_$/) { - Log3 $name, 4, "DbLog $name -> Device \"$dev_name\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + if($dev_name =~ m/^$_$/) { + Log3 $name, 4, "DbLog $name -> Device \"$dev_name\" global excluded from logging by attribute \"excludeDevs\" " if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); $next = 1; - } + } } - } - } + } + } } next if($next); } - - Log3 $name, 5, "DbLog $name -> parsed Event: $dev_name , Event: $event" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - Log3 $name, 5, "DbLog $name -> DbLogExclude of \"$dev_name\": $DbLogExclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogExclude); - Log3 $name, 5, "DbLog $name -> DbLogInclude of \"$dev_name\": $DbLogInclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogInclude); + + Log3 $name, 5, "DbLog $name -> parsed Event: $dev_name , Event: $event" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + Log3 $name, 5, "DbLog $name -> DbLogExclude of \"$dev_name\": $DbLogExclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogExclude); + Log3 $name, 5, "DbLog $name -> DbLogInclude of \"$dev_name\": $DbLogInclude" if($vb4show && !$hash->{HELPER}{".RUNNING_PID"} && $DbLogInclude); # Je nach DBLogSelectionMode muss das vorgegebene Ergebnis der Include-, bzw. Exclude-Pruefung # entsprechend unterschiedlich vorbelegt sein. @@ -1385,15 +1411,15 @@ sub DbLog_Log($$) { $DoIt = 1 if($DbLogSelectionMode =~ m/Exclude/ ); - if($DbLogExclude && $DbLogSelectionMode =~ m/Exclude/) { + if($DbLogExclude && $DbLogSelectionMode =~ m/Exclude/) { # Bsp: "(temperature|humidity):300,battery:3600:force" my @v1 = split(/,/, $DbLogExclude); - for (my $i=0; $i{NAME}}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME}; my $lv = $defs{$dev_hash->{NAME}}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE}; @@ -1409,17 +1435,17 @@ sub DbLog_Log($$) { } } - # Hier ggf. zusätzlich noch dbLogInclude pruefen, falls bereits durch DbLogExclude ausgeschlossen + # Hier ggf. zusätzlich noch dbLogInclude pruefen, falls bereits durch DbLogExclude ausgeschlossen # Im Endeffekt genau die gleiche Pruefung, wie fuer DBLogExclude, lediglich mit umgegkehrtem Ergebnis. if($DoIt == 0) { if($DbLogInclude && ($DbLogSelectionMode =~ m/Include/)) { my @v1 = split(/,/, $DbLogInclude); - for (my $i=0; $i{NAME}}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME}; my $lv = $defs{$dev_hash->{NAME}}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE}; @@ -1439,32 +1465,38 @@ sub DbLog_Log($$) { # check auf defaultMinInterval $DoIt = DbLog_checkDefMinInt($name,$dev_name,$now,$reading,$value); - - if ($DoIt) { + + if ($DoIt) { + my $lastt = $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME}; # patch Forum:#111423 + my $lastv = $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE}; $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME} = $now; $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE} = $value; - # Device spezifische DbLogValueFn-Funktion anwenden - if($DbLogValueFn ne '') { - my $TIMESTAMP = $timestamp; - my $DEVICE = $dev_name; - my $EVENT = $event; - my $READING = $reading; - my $VALUE = $value; - my $UNIT = $unit; - my $IGNORE = 0; - my $CN = " "; + # Device spezifische DbLogValueFn-Funktion anwenden + if($DbLogValueFn ne '') { + my $TIMESTAMP = $timestamp; + my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 + my $DEVICE = $dev_name; + my $EVENT = $event; + my $READING = $reading; + my $VALUE = $value; + my $LASTVALUE = $lastv // ""; # patch Forum:#111423 + my $UNIT = $unit; + my $IGNORE = 0; + my $CN = " "; - eval $DbLogValueFn; - Log3 $name, 2, "DbLog $name -> error device \"$dev_name\" specific DbLogValueFn: ".$@ if($@); + eval $DbLogValueFn; + Log3 $name, 2, "DbLog $name -> error device \"$dev_name\" specific DbLogValueFn: ".$@ if($@); + + if($IGNORE) { + # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME} = $lastt if($lastt); # patch Forum:#111423 + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE} = $lastv if(defined $lastv); + Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by device \"$dev_name\" specific DbLogValueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + next; + } - if($IGNORE) { - # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt in $DbLogValueFn - Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by device \"$dev_name\" specific DbLogValueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" - if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - next; - } - my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; if (!$@) { @@ -1472,32 +1504,36 @@ sub DbLog_Log($$) { } else { Log3 ($name, 2, "DbLog $name -> TIMESTAMP got from DbLogValueFn in $dev_name is invalid: $TIMESTAMP"); } - $reading = $READING if($READING ne ''); - $value = $VALUE if(defined $VALUE); - $unit = $UNIT if(defined $UNIT); + $reading = $READING if($READING ne ''); + $value = $VALUE if(defined $VALUE); + $unit = $UNIT if(defined $UNIT); } - - # zentrale valueFn im DbLog-Device abarbeiten - if($value_fn ne '') { - my $TIMESTAMP = $timestamp; - my $DEVICE = $dev_name; - my $DEVICETYPE = $dev_type; - my $EVENT = $event; - my $READING = $reading; - my $VALUE = $value; - my $UNIT = $unit; - my $IGNORE = 0; - my $CN = " "; + + # zentrale valueFn im DbLog-Device abarbeiten + if($value_fn ne '') { + my $TIMESTAMP = $timestamp; + my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 + my $DEVICE = $dev_name; + my $DEVICETYPE = $dev_type; + my $EVENT = $event; + my $READING = $reading; + my $VALUE = $value; + my $LASTVALUE = $lastv // ""; # patch Forum:#111423 + my $UNIT = $unit; + my $IGNORE = 0; + my $CN = " "; - eval $value_fn; - Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); + eval $value_fn; + Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - if($IGNORE) { - # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt in $value_fn - Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by valueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" - if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - next; - } + if($IGNORE) { + # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{TIME} = $lastt if($lastt); # patch Forum:#111423 + $defs{$dev_name}{Helper}{DBLOG}{$reading}{$hash->{NAME}}{VALUE} = $lastv if(defined $lastv); + Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by valueFn - TS: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + next; + } my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; if (!$@) { @@ -1505,56 +1541,56 @@ sub DbLog_Log($$) { } else { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } - $dev_name = $DEVICE if($DEVICE ne ''); - $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); - $reading = $READING if($READING ne ''); - $value = $VALUE if(defined $VALUE); - $unit = $UNIT if(defined $UNIT); + $dev_name = $DEVICE if($DEVICE ne ''); + $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); + $reading = $READING if($READING ne ''); + $value = $VALUE if(defined $VALUE); + $unit = $UNIT if(defined $UNIT); } - # Daten auf maximale Länge beschneiden + # Daten auf maximale Länge beschneiden ($dev_name,$dev_type,$event,$reading,$value,$unit) = DbLog_cutCol($hash,$dev_name,$dev_type,$event,$reading,$value,$unit); - my $row = ($timestamp."|".$dev_name."|".$dev_type."|".$event."|".$reading."|".$value."|".$unit); - Log3 $hash->{NAME}, 4, "DbLog $name -> added event - Timestamp: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" - if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); + my $row = ($timestamp."|".$dev_name."|".$dev_type."|".$event."|".$reading."|".$value."|".$unit); + Log3 $hash->{NAME}, 4, "DbLog $name -> added event - Timestamp: $timestamp, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $reading, Value: $value, Unit: $unit" + if($vb4show && !$hash->{HELPER}{".RUNNING_PID"}); - if($async) { - # asynchoner non-blocking Mode - # Cache & CacheIndex für Events zum asynchronen Schreiben in DB - $data{DbLog}{$name}{cache}{index}++; - my $index = $data{DbLog}{$name}{cache}{index}; - $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - - my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; - if($ce == 1) { + if($async) { + # asynchoner non-blocking Mode + # Cache & CacheIndex für Events zum asynchronen Schreiben in DB + $data{DbLog}{$name}{cache}{index}++; + my $index = $data{DbLog}{$name}{cache}{index}; + $data{DbLog}{$name}{cache}{memcache}{$index} = $row; + + my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; + if($ce == 1) { readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); - } else { - readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); - } - # asynchrone Schreibroutine aufrufen wenn Füllstand des Cache erreicht ist - if($memcount >= $clim) { + } else { + readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + } + # asynchrone Schreibroutine aufrufen wenn Füllstand des Cache erreicht ist + if($memcount >= $clim) { my $lmlr = $hash->{HELPER}{LASTLIMITRUNTIME}; my $syncival = AttrVal($name, "syncInterval", 30); if(!$lmlr || gettimeofday() > $lmlr+($syncival/2)) { - Log3 $hash->{NAME}, 4, "DbLog $name -> Number of cache entries reached cachelimit $clim - start database sync."; - DbLog_execmemcache($hash); + Log3 $hash->{NAME}, 4, "DbLog $name -> Number of cache entries reached cachelimit $clim - start database sync."; + DbLog_execmemcache($hash); $hash->{HELPER}{LASTLIMITRUNTIME} = gettimeofday(); } - } - # Notify-Routine Laufzeit ermitteln + } + # Notify-Routine Laufzeit ermitteln $net = tv_interval($nst); - } else { - # synchoner Mode - push(@row_array, $row); - } - } + } else { + # synchoner Mode + push(@row_array, $row); + } + } } } }; if(!$async) { if(@row_array) { - # synchoner Mode + # synchoner Mode return if($hash->{HELPER}{REOPEN_RUNS}); # return wenn "reopen" mit Ablaufzeit gestartet ist my $error = DbLog_Push($hash, $vb4show, @row_array); Log3 ($name, 5, "DbLog $name -> DbLog_Push Returncode: $error") if($error && $vb4show); @@ -1564,8 +1600,8 @@ sub DbLog_Log($$) { my $evt = ($state eq $hash->{HELPER}{OLDSTATE})?0:1; readingsSingleUpdate($hash, "state", $state, $evt); $hash->{HELPER}{OLDSTATE} = $state; - - # Notify-Routine Laufzeit ermitteln + + # Notify-Routine Laufzeit ermitteln $net = tv_interval($nst); } else { CancelDelayedShutdown($name) if($hash->{HELPER}{SHUTDOWNSEQ}); @@ -1618,7 +1654,7 @@ sub DbLog_checkDefMinInt ($$$$$){ # Bsp. für ein defaulMInInterval Element: # device::interval[::force] my @dvs = devspec2array($adefelem[0]); - if(@dvs) { + if(@dvs) { foreach (@dvs) { if($dev_name =~ m,^$_$,) { # aktuelles Device matcht auf Regexp # device,reading wird gegen "defaultMinInterval" geprüft @@ -1667,7 +1703,7 @@ sub DbLog_Push(@) { # andererseite kein "MySQL-Server has gone away" Fehler if ($nh) { $dbh = DbLog_ConnectNewDBH($hash); - return if(!$dbh); + return if(!$dbh); } else { $dbh = $hash->{DBHP}; eval { @@ -1678,7 +1714,7 @@ sub DbLog_Push(@) { }; if ($@) { Log3($name, 1, "DbLog $name: DBLog_Push - DB Session dead! - $@"); - return $@; + return $@; } else { $dbh = $hash->{DBHP}; } @@ -1723,16 +1759,16 @@ sub DbLog_Push(@) { foreach my $row (@row_array) { my @a = split("\\|",$row); s/_ESC_/\|/g for @a; # escaped Pipe return to "|" - push(@timestamp, "$a[0]"); - push(@device, "$a[1]"); - push(@type, "$a[2]"); - push(@event, "$a[3]"); - push(@reading, "$a[4]"); - push(@value, "$a[5]"); - push(@unit, "$a[6]"); - Log3 $hash->{NAME}, 4, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]" - if($vb4show); - } + push(@timestamp, "$a[0]"); + push(@device, "$a[1]"); + push(@type, "$a[2]"); + push(@event, "$a[3]"); + push(@reading, "$a[4]"); + push(@value, "$a[5]"); + push(@unit, "$a[6]"); + Log3 $hash->{NAME}, 4, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]" + if($vb4show); + } use warnings; if($bi) { @@ -1767,7 +1803,7 @@ sub DbLog_Push(@) { $a[5] =~ s/\\/\\\\/g; # escape \ with \\ $a[6] =~ s/\\/\\\\/g; # escape \ with \\ $sqlins .= "('$a[0]','$a[1]','$a[2]','$a[3]','$a[4]','$a[5]','$a[6]'),"; - } + } use warnings; chop($sqlins); @@ -1792,9 +1828,9 @@ sub DbLog_Push(@) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ins_hist of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; } } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit @@ -1827,7 +1863,7 @@ sub DbLog_Push(@) { # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode # die nicht upgedateten Sätze nicht identifiziert werden können if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { @@ -1848,7 +1884,7 @@ sub DbLog_Push(@) { $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } else { + } else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } @@ -1988,10 +2024,10 @@ sub DbLog_Push(@) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table $history due to PK on columns $pkh"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table $history due to PK on columns $pkh"; } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; - } + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; + } } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { @@ -2021,7 +2057,7 @@ sub DbLog_Push(@) { ######################################## # insert current mit/ohne primary key if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { @@ -2042,7 +2078,7 @@ sub DbLog_Push(@) { $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } else { + } else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } @@ -2174,15 +2210,15 @@ sub DbLog_execmemcache ($) { my (@row_array,$memcount,$dbh); RemoveInternalTimer($hash, "DbLog_execmemcache"); - + if($init_done != 1) { InternalTimer(gettimeofday()+5, "DbLog_execmemcache", $hash, 0); - return; + return; } # return wenn "reopen" mit Zeitangabe läuft, oder kein asynchroner Mode oder wenn disabled if(!$async || IsDisabled($name) || $hash->{HELPER}{REOPEN_RUNS}) { - return; + return; } # tote PID's löschen @@ -2199,17 +2235,17 @@ sub DbLog_execmemcache ($) { # bei SQLite Sperrverwaltung Logging wenn andere schreibende Zugriffe laufen if($hash->{MODEL} eq "SQLITE") { if($hash->{HELPER}{DELDAYS_PID}) { - $error = "deleteOldDaysNbl is running - resync at NextSync"; - $dolog = 0; - } + $error = "deleteOldDaysNbl is running - resync at NextSync"; + $dolog = 0; + } if($hash->{HELPER}{REDUCELOG_PID}) { - $error = "reduceLogNbl is running - resync at NextSync"; - $dolog = 0; - } - if($hash->{HELPER}{".RUNNING_PID"}) { - $error = "Commit already running - resync at NextSync"; - $dolog = 0; - } + $error = "reduceLogNbl is running - resync at NextSync"; + $dolog = 0; + } + if($hash->{HELPER}{".RUNNING_PID"}) { + $error = "Commit already running - resync at NextSync"; + $dolog = 0; + } } $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; @@ -2218,35 +2254,35 @@ sub DbLog_execmemcache ($) { } else { readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); } - - if($memcount && $dolog && !$hash->{HELPER}{".RUNNING_PID"}) { + + if($memcount && $dolog && !$hash->{HELPER}{".RUNNING_PID"}) { Log3 $name, 4, "DbLog $name -> ################################################################"; Log3 $name, 4, "DbLog $name -> ### New database processing cycle - asynchronous ###"; Log3 $name, 4, "DbLog $name -> ################################################################"; - Log3 $name, 4, "DbLog $name -> MemCache contains $memcount entries to process"; - Log3 $name, 4, "DbLog $name -> DbLogType is: $DbLogType"; - - foreach my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { + Log3 $name, 4, "DbLog $name -> MemCache contains $memcount entries to process"; + Log3 $name, 4, "DbLog $name -> DbLogType is: $DbLogType"; + + foreach my $key (sort(keys %{$data{DbLog}{$name}{cache}{memcache}})) { Log3 $hash->{NAME}, 5, "DbLog $name -> MemCache contains: ".$data{DbLog}{$name}{cache}{memcache}{$key}; - push(@row_array, delete($data{DbLog}{$name}{cache}{memcache}{$key})); - } + push(@row_array, delete($data{DbLog}{$name}{cache}{memcache}{$key})); + } delete $data{DbLog}{$name}{cache}{memcache}; # sicherheitshalber Memory freigeben: https://perlmaven.com/undef-on-perl-arrays-and-hashes , bzw. https://www.effectiveperlprogramming.com/2018/09/undef-a-scalar-to-release-its-memory/ - my $rowlist = join('§', @row_array); - $rowlist = encode_base64($rowlist,""); - $hash->{HELPER}{".RUNNING_PID"} = BlockingCall ( - "DbLog_PushAsync", - "$name|$rowlist", - "DbLog_PushAsyncDone", - $timeout, - "DbLog_PushAsyncAborted", - $hash ); + my $rowlist = join('§', @row_array); + $rowlist = encode_base64($rowlist,""); + $hash->{HELPER}{".RUNNING_PID"} = BlockingCall ( + "DbLog_PushAsync", + "$name|$rowlist", + "DbLog_PushAsyncDone", + $timeout, + "DbLog_PushAsyncAborted", + $hash ); $hash->{HELPER}{".RUNNING_PID"}{loglevel} = 4; Log3 $hash->{NAME}, 5, "DbLog $name -> DbLog_PushAsync called with timeout: $timeout"; } else { if($dolog && $hash->{HELPER}{".RUNNING_PID"}) { - $error = "Commit already running - resync at NextSync"; - } else { + $error = "Commit already running - resync at NextSync"; + } else { CancelDelayedShutdown($name) if($hash->{HELPER}{SHUTDOWNSEQ}); Log3 ($name, 2, "DbLog $name - no data for last database write cycle") if(delete $hash->{HELPER}{SHUTDOWNSEQ}); } @@ -2254,7 +2290,7 @@ sub DbLog_execmemcache ($) { my $nextsync = gettimeofday()+$syncival; my $nsdt = FmtDateTime($nextsync); - + if(AttrVal($name, "syncEvents", undef)) { readingsSingleUpdate($hash, "NextSync", $nsdt. " or if CacheUsage ".$clim." reached", 1); } else { @@ -2306,12 +2342,12 @@ sub DbLog_PushAsync(@) { my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } elsif($useac == 1) { - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } else { # Server default - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } }; if ($@) { @@ -2360,9 +2396,9 @@ sub DbLog_PushAsync(@) { push(@value, "$a[5]"); push(@unit, "$a[6]"); Log3 $hash->{NAME}, 5, "DbLog $name -> processing event Timestamp: $a[0], Device: $a[1], Type: $a[2], Event: $a[3], Reading: $a[4], Value: $a[5], Unit: $a[6]"; - } + } use warnings; - + if($bi) { ####################### # Bulk-Insert @@ -2394,7 +2430,7 @@ sub DbLog_PushAsync(@) { $a[5] =~ s/\\/\\\\/g; # escape \ with \\ $a[6] =~ s/\\/\\\\/g; # escape \ with \\ $sqlins .= "('$a[0]','$a[1]','$a[2]','$a[3]','$a[4]','$a[5]','$a[6]'),"; - } + } use warnings; chop($sqlins); @@ -2419,9 +2455,9 @@ sub DbLog_PushAsync(@) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ins_hist of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".$ins_hist." of $ceti events inserted into table $history due to PK on columns $pkh"; } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".$ins_hist." of $ceti events inserted into table $history"; } } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit @@ -2440,7 +2476,7 @@ sub DbLog_PushAsync(@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; $error = encode_base64($errorh,""); - $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein + $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein } } @@ -2450,7 +2486,7 @@ sub DbLog_PushAsync(@) { # Array-Insert wird auch bei Bulk verwendet weil im Bulk-Mode # die nicht upgedateten Sätze nicht identifiziert werden können if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { @@ -2475,7 +2511,7 @@ sub DbLog_PushAsync(@) { $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } else { + } else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } @@ -2622,13 +2658,13 @@ sub DbLog_PushAsync(@) { Log3 $hash->{NAME}, 4, "DbLog $name -> $ceti of $ceti events inserted into table $history".($usepkh?" using PK on columns $pkh":""); } else { if($usepkh) { - Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table history due to PK on columns $pkh"; + Log3 $hash->{NAME}, 3, "DbLog $name -> INFO - ".($ceti-$nins_hist)." of $ceti events inserted into table history due to PK on columns $pkh"; } else { - Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; + Log3 $hash->{NAME}, 2, "DbLog $name -> WARNING - only ".($ceti-$nins_hist)." of $ceti events inserted into table $history"; } s/\|/_ESC_/g for @n2hist; # escape Pipe "|" $rowlist = join('§', @n2hist); - $rowlist = encode_base64($rowlist,""); + $rowlist = encode_base64($rowlist,""); } eval {$dbh->commit() if(!$dbh->{AutoCommit});}; # Data commit if ($@) { @@ -2646,7 +2682,7 @@ sub DbLog_PushAsync(@) { $errorh = $@; Log3 $hash->{NAME}, 2, "DbLog $name -> Error table $history - $errorh"; $error = encode_base64($errorh,""); - $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein + $rowlback = $rowlist if($useta); # nicht gespeicherte Datensätze nur zurück geben wenn Transaktion ein } } @@ -2654,7 +2690,7 @@ sub DbLog_PushAsync(@) { ######################################## # insert current mit/ohne primary key if ($usepkc && $hash->{MODEL} eq 'MYSQL') { - eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; + eval { $sth_ic = $dbh->prepare("INSERT IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'SQLITE') { eval { $sth_ic = $dbh->prepare("INSERT OR IGNORE INTO $current (TIMESTAMP, DEVICE, TYPE, EVENT, READING, VALUE, UNIT) VALUES (?,?,?,?,?,?,?)"); }; } elsif ($usepkc && $hash->{MODEL} eq 'POSTGRESQL') { @@ -2680,7 +2716,7 @@ sub DbLog_PushAsync(@) { $sth_uc = $dbh->prepare("INSERT INTO $current (TIMESTAMP, TYPE, EVENT, VALUE, UNIT, DEVICE, READING) VALUES (?,?,?,?,?,?,?) ON CONFLICT ($pkc) DO UPDATE SET TIMESTAMP=EXCLUDED.TIMESTAMP, DEVICE=EXCLUDED.DEVICE, TYPE=EXCLUDED.TYPE, EVENT=EXCLUDED.EVENT, READING=EXCLUDED.READING, VALUE=EXCLUDED.VALUE, UNIT=EXCLUDED.UNIT"); - } else { + } else { $sth_uc = $dbh->prepare("UPDATE $current SET TIMESTAMP=?, TYPE=?, EVENT=?, VALUE=?, UNIT=? WHERE (DEVICE=?) AND (READING=?)"); } @@ -2799,17 +2835,17 @@ sub DbLog_PushAsyncDone ($) { if($rowlist) { $rowlist = decode_base64($rowlist); my @row_array = split('§', $rowlist); - - #one Transaction + + #one Transaction eval { - foreach my $row (@row_array) { - # Cache & CacheIndex für Events zum asynchronen Schreiben in DB - $data{DbLog}{$name}{cache}{index}++; - my $index = $data{DbLog}{$name}{cache}{index}; - $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - } - $memcount = scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}); - }; + foreach my $row (@row_array) { + # Cache & CacheIndex für Events zum asynchronen Schreiben in DB + $data{DbLog}{$name}{cache}{index}++; + my $index = $data{DbLog}{$name}{cache}{index}; + $data{DbLog}{$name}{cache}{memcache}{$index} = $row; + } + $memcount = scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}); + }; } $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; @@ -2830,9 +2866,9 @@ sub DbLog_PushAsyncDone ($) { if(!$asyncmode) { delete($defs{$name}{READINGS}{NextSync}); - delete($defs{$name}{READINGS}{background_processing_time}); - delete($defs{$name}{READINGS}{sql_processing_time}); - delete($defs{$name}{READINGS}{CacheUsage}); + delete($defs{$name}{READINGS}{background_processing_time}); + delete($defs{$name}{READINGS}{sql_processing_time}); + delete($defs{$name}{READINGS}{CacheUsage}); } delete $hash->{HELPER}{".RUNNING_PID"}; delete $hash->{HELPER}{LASTLIMITRUNTIME} if(!$error); @@ -2935,13 +2971,13 @@ sub DbLog_readCfg($){ $hash->{MODEL}="unknown"; Log3 $hash->{NAME}, 1, "Unknown database model found in configuration file $configfilename."; Log3 $hash->{NAME}, 1, "Only MySQL/MariaDB, PostgreSQL, Oracle, SQLite are fully supported."; - return "unknown database type"; + return "unknown database type"; } if($hash->{MODEL} eq "MYSQL") { - $hash->{UTF8} = defined($dbconfig{utf8})?$dbconfig{utf8}:0; + $hash->{UTF8} = defined($dbconfig{utf8})?$dbconfig{utf8}:0; } - + return; } @@ -2959,7 +2995,7 @@ sub DbLog_ConnectPush($;$$) { if($init_done != 1) { InternalTimer(gettimeofday()+5, "DbLog_ConnectPush", $hash, 0); - return; + return; } Log3 $hash->{NAME}, 3, "DbLog $name - Creating Push-Handle to database $dbconn with user $dbuser" if(!$get); @@ -2967,18 +3003,18 @@ sub DbLog_ConnectPush($;$$) { my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { - $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, mysql_enable_utf8 => $utf8 }); + $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } elsif($useac == 1) { - $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, mysql_enable_utf8 => $utf8 }); + $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } else { # Server default - $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, mysql_enable_utf8 => $utf8 }); + $dbhp = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } }; if($@) { $err = $@; - Log3 $hash->{NAME}, 2, "DbLog $name - Error: $@"; + Log3 $hash->{NAME}, 2, "DbLog $name - Error: $@"; } if(!$dbhp) { @@ -3002,7 +3038,7 @@ sub DbLog_ConnectPush($;$$) { Log3 $hash->{NAME}, 3, "DbLog $name - UTF8 support enabled" if($utf8 && $hash->{MODEL} eq "MYSQL" && !$get); if(!$get) { $state = "connected"; - $evt = ($state eq $hash->{HELPER}{OLDSTATE})?0:1; + $evt = ($state eq $hash->{HELPER}{OLDSTATE})?0:1; readingsSingleUpdate($hash, "state", $state, $evt); $hash->{HELPER}{OLDSTATE} = $state; } @@ -3034,12 +3070,12 @@ sub DbLog_ConnectNewDBH($) { my ($useac,$useta) = DbLog_commitMode($hash); eval { if(!$useac) { - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } elsif($useac == 1) { - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } else { # Server default - $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, mysql_enable_utf8 => $utf8 }); + $dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 }); } }; @@ -3087,7 +3123,7 @@ sub DbLog_ExecSQL($$) { $sth = DbLog_ExecSQL1($hash,$dbh,$sql); if(!$sth) { Log3($name, 2, "DbLog $name - Backdoor retry failed"); - $dbh->disconnect(); + $dbh->disconnect(); return 0; } Log3($name, 2, "DbLog $name - Backdoor retry ok"); @@ -3230,7 +3266,7 @@ sub DbLog_Get($@) { my $nh = ($hash->{MODEL} ne 'SQLITE')?1:0; if ($nh || $hash->{PID} != $$) { # 17.04.2019 Forum: https://forum.fhem.de/index.php/topic,99719.0.html $dbh = DbLog_ConnectNewDBH($hash); - return "Can't connect to database." if(!$dbh); + return "Can't connect to database." if(!$dbh); } else { $dbh = $hash->{DBHP}; @@ -3242,7 +3278,7 @@ sub DbLog_Get($@) { }; if ($@) { Log3($name, 1, "DbLog $name: DBLog_Push - DB Session dead! - $@"); - return $@; + return $@; } else { $dbh = $hash->{DBHP}; } @@ -3320,17 +3356,17 @@ sub DbLog_Get($@) { if($deltacalc) { # delta-h und delta-d , geändert V4.8.0 / 14.10.2019 $stm = "SELECT Z.TIMESTAMP, Z.DEVICE, Z.READING, Z.VALUE from "; - - $stm .= "(SELECT $sqlspec{get_timestamp} AS TIMESTAMP, + + $stm .= "(SELECT $sqlspec{get_timestamp} AS TIMESTAMP, DEVICE AS DEVICE, READING AS READING, VALUE AS VALUE "; - + $stm .= "FROM $current " if($inf eq "current"); $stm .= "FROM $history " if($inf eq "history"); $stm .= "WHERE 1=1 "; - + $stm .= "AND DEVICE = '".$readings[$i]->[0]."' " if ($readings[$i]->[0] !~ m(\%)); $stm .= "AND DEVICE LIKE '".$readings[$i]->[0]."' " if(($readings[$i]->[0] !~ m(^\%$)) && ($readings[$i]->[0] =~ m(\%))); @@ -3744,14 +3780,29 @@ sub DbLog_configcheck($) { my $dbi = $DBI::VERSION; # DBI Version my %drivers = DBI->installed_drivers(); my $dv = ""; - if($dbmodel =~ /MYSQL/i) { + if($dbmodel =~ /MYSQL/xi) { for (keys %drivers) { - $dv = $_ if($_ =~ /mysql|mariadb/); + $dv = $_ if($_ =~ /mysql|mariadb/x); + } + } + my $dbd = ($dbmodel =~ /POSTGRESQL/xi) ? "Pg: ".$DBD::Pg::VERSION: # DBD Version + ($dbmodel =~ /MYSQL/xi && $dv) ? "$dv: ".$DBD::mysql::VERSION: + ($dbmodel =~ /SQLITE/xi) ? "SQLite: ".$DBD::SQLite::VERSION:"Undefined"; + + my $dbdhint = ""; + my $dbdupd = 0; + if($dbmodel =~ /MYSQL/xi && $dv) { # check DBD Mindest- und empfohlene Version + my $dbdver = $DBD::mysql::VERSION * 1; # String to Zahl Konversion + if($dbdver < 4.032) { + $dbdhint = "Caution: Your DBD version doesn't support UTF8. "; + $dbdupd = 1; + } elsif ($dbdver < 4.042) { + $dbdhint = "Caution: Full UTF-8 support exists from DBD version 4.032, but installing DBD version 4.042 is highly suggested. "; + $dbdupd = 1; + } else { + $dbdhint = "Your DBD version fulfills UTF8 support, no need to update DBD."; } } - my $dbd = ($dbmodel =~ /POSTGRESQL/i)?"Pg: ".$DBD::Pg::VERSION: # DBD Version - ($dbmodel =~ /MYSQL/i && $dv)?"$dv: ".$DBD::mysql::VERSION: - ($dbmodel =~ /SQLITE/i)?"SQLite: ".$DBD::SQLite::VERSION:"Undefined"; my ($errcm,$supd,$uptb) = DbLog_checkModVer($name); # DbLog Version @@ -3761,14 +3812,14 @@ sub DbLog_configcheck($) { $check .= "Used DBI (Database independent interface) version: $dbi
"; $check .= "Used DBD (Database driver) version $dbd
"; if($errcm) { - $check .= "Recommendation: ERROR - $errcm

"; + $check .= "Recommendation: ERROR - $errcm. $dbdhint

"; } if($supd) { $check .= "Used DbLog version: $hash->{HELPER}{VERSION}.
$uptb
"; - $check .= "Recommendation: You should update FHEM to get the recent DbLog version from repository !

"; + $check .= "Recommendation: You should update FHEM to get the recent DbLog version from repository ! $dbdhint

"; } else { $check .= "Used DbLog version: $hash->{HELPER}{VERSION}.
$uptb
"; - $check .= "Recommendation: No update of DbLog is needed.

"; + $check .= "Recommendation: No update of DbLog is needed. $dbdhint

"; } ### Configuration read check @@ -3797,21 +3848,29 @@ sub DbLog_configcheck($) { my ($chutf8mod,$chutf8dat); if($dbmodel =~ /MYSQL/) { @ce = DbLog_sqlget($hash,"SHOW VARIABLES LIKE 'character_set_connection'"); - $chutf8mod = @ce?uc($ce[1]):"no result"; - @se = DbLog_sqlget($hash,"SHOW VARIABLES LIKE 'character_set_database'"); - $chutf8dat = @se?uc($se[1]):"no result"; - if($chutf8mod eq $chutf8dat) { + $chutf8mod = @ce ? uc($ce[1]) : "no result"; + @se = DbLog_sqlget($hash,"SHOW VARIABLES LIKE 'character_set_database'"); + $chutf8dat = @se ? uc($se[1]) : "no result"; + + if($chutf8mod eq $chutf8dat) { $rec = "settings o.k."; } else { $rec = "Both encodings should be identical. You can adjust the usage of UTF8 connection by setting the UTF8 parameter in file '$hash->{CONFIGURATION}' to the right value. "; } + + if(uc($chutf8mod) ne "UTF8" && uc($chutf8dat) ne "UTF8") { + $dbdhint = ""; + } else { + $dbdhint .= " If you want use UTF8 database option, you must update DBD (Database driver) to at least version 4.032. " if($dbdupd); + } + } if($dbmodel =~ /POSTGRESQL/) { @ce = DbLog_sqlget($hash,"SHOW CLIENT_ENCODING"); - $chutf8mod = @ce?uc($ce[0]):"no result"; - @se = DbLog_sqlget($hash,"select character_set_name from information_schema.character_sets"); - $chutf8dat = @se?uc($se[0]):"no result"; - if($chutf8mod eq $chutf8dat) { + $chutf8mod = @ce ? uc($ce[0]) : "no result"; + @se = DbLog_sqlget($hash,"select character_set_name from information_schema.character_sets"); + $chutf8dat = @se ? uc($se[0]) : "no result"; + if($chutf8mod eq $chutf8dat) { $rec = "settings o.k."; } else { $rec = "This is only an information. PostgreSQL supports automatic character set conversion between server and client for certain character set combinations. The conversion information is stored in the pg_conversion system catalog. PostgreSQL comes with some predefined conversions."; @@ -3819,8 +3878,8 @@ sub DbLog_configcheck($) { } if($dbmodel =~ /SQLITE/) { @ce = DbLog_sqlget($hash,"PRAGMA encoding"); - $chutf8dat = @ce?uc($ce[0]):"no result"; - @se = DbLog_sqlget($hash,"PRAGMA table_info($history)"); + $chutf8dat = @ce ? uc($ce[0]) : "no result"; + @se = DbLog_sqlget($hash,"PRAGMA table_info($history)"); $rec = "This is only an information about text encoding used by the main database."; } @@ -3834,13 +3893,13 @@ sub DbLog_configcheck($) { if(!@ce || !@se) { $check .= "Connection to database was not successful.
"; $check .= "Recommendation: Plese check logfile for further information.

"; - $check .= ""; + $check .= ""; return $check; } $check .= "Result of encoding check

"; $check .= "Encoding used by Client (connection): $chutf8mod
" if($dbmodel !~ /SQLITE/); $check .= "Encoding used by DB $dbname: $chutf8dat
"; - $check .= "Recommendation: $rec

"; + $check .= "Recommendation: $rec $dbdhint

"; ### Check Betriebsmodus ####################################################################### @@ -3854,15 +3913,15 @@ sub DbLog_configcheck($) { if($mode =~ /asynchronous/) { my $max = AttrVal("global", "blockingCallMax", 0); if(!$max || $max >= 6) { - $rec = "settings o.k."; - } else { - $rec = "WARNING - you are running asynchronous mode that is recommended, but the value of global device attribute \"blockingCallMax\" is set quite small.
"; - $rec .= "This may cause problems in operation. It is recommended to increase the global blockingCallMax attribute."; - } + $rec = "settings o.k."; + } else { + $rec = "WARNING - you are running asynchronous mode that is recommended, but the value of global device attribute \"blockingCallMax\" is set quite small.
"; + $rec .= "This may cause problems in operation. It is recommended to increase the global blockingCallMax attribute."; + } } else { $rec = "Switch $name to the asynchronous logmode by setting the 'asyncMode' attribute. The advantage of this mode is to log events non-blocking.
"; - $rec .= "There are attributes 'syncInterval' and 'cacheLimit' relevant for this working mode.
"; - $rec .= "Please refer to commandref for further information about these attributes."; + $rec .= "There are attributes 'syncInterval' and 'cacheLimit' relevant for this working mode.
"; + $rec .= "Please refer to commandref for further information about these attributes."; } $check .= "Recommendation: $rec

"; @@ -3882,47 +3941,47 @@ sub DbLog_configcheck($) { ### Check Plot Erstellungsmodus ####################################################################### $check .= "Result of plot generation method check

"; - my @webdvs = devspec2array("TYPE=FHEMWEB:FILTER=STATE=Initialized"); - my ($forks,$emb) = (1,1); - my $wall = ""; + my @webdvs = devspec2array("TYPE=FHEMWEB:FILTER=STATE=Initialized"); + my ($forks,$emb) = (1,1); + my $wall = ""; foreach (@webdvs) { - my $web = $_; - my $pf = AttrVal($web,"plotfork",0); - my $pe = AttrVal($web,"plotEmbed",0); - $forks = 0 if(!$pf); - $emb = 0 if($pe =~ /[01]/); - if(!$pf || $pe =~ /[01]/) { - $wall .= "".$web.": plotfork=".$pf." / plotEmbed=".$pe."
"; - } else { - $wall .= $web.": plotfork=".$pf." / plotEmbed=".$pe."
"; - } - } + my $web = $_; + my $pf = AttrVal($web,"plotfork",0); + my $pe = AttrVal($web,"plotEmbed",0); + $forks = 0 if(!$pf); + $emb = 0 if($pe =~ /[01]/); + if(!$pf || $pe =~ /[01]/) { + $wall .= "".$web.": plotfork=".$pf." / plotEmbed=".$pe."
"; + } else { + $wall .= $web.": plotfork=".$pf." / plotEmbed=".$pe."
"; + } + } if(!$forks || !$emb) { - $check .= "WARNING - at least one of your FHEMWEB devices has attribute \"plotfork = 1\" and/or attribute \"plotEmbed = 2\" not set.

"; - $check .= $wall; - $rec = "You should set attribute \"plotfork = 1\" and \"plotEmbed = 2\" in relevant devices. ". + $check .= "WARNING - at least one of your FHEMWEB devices has attribute \"plotfork = 1\" and/or attribute \"plotEmbed = 2\" not set.

"; + $check .= $wall; + $rec = "You should set attribute \"plotfork = 1\" and \"plotEmbed = 2\" in relevant devices. ". "If these attributes are not set, blocking situations may occure when creating plots. ". - "Note: Your system must have sufficient memory to handle parallel running Perl processes. See also global attribute \"blockingCallMax\".
" - } else { - $check .= $wall; - $rec = "settings o.k."; - } - $check .= "
"; - $check .= "Recommendation: $rec

"; + "Note: Your system must have sufficient memory to handle parallel running Perl processes. See also global attribute \"blockingCallMax\".
" + } else { + $check .= $wall; + $rec = "settings o.k."; + } + $check .= "
"; + $check .= "Recommendation: $rec

"; ### Check Spaltenbreite history ####################################################################### my (@sr_dev,@sr_typ,@sr_evt,@sr_rdg,@sr_val,@sr_unt); my ($cdat_dev,$cdat_typ,$cdat_evt,$cdat_rdg,$cdat_val,$cdat_unt); my ($cmod_dev,$cmod_typ,$cmod_evt,$cmod_rdg,$cmod_val,$cmod_unt); - + if($dbmodel =~ /MYSQL/) { @sr_dev = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='DEVICE'"); - @sr_typ = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='TYPE'"); - @sr_evt = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='EVENT'"); - @sr_rdg = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='READING'"); - @sr_val = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='VALUE'"); - @sr_unt = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='UNIT'"); + @sr_typ = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='TYPE'"); + @sr_evt = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='EVENT'"); + @sr_rdg = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='READING'"); + @sr_val = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='VALUE'"); + @sr_unt = DbLog_sqlget($hash,"SHOW FIELDS FROM $history where FIELD='UNIT'"); } if($dbmodel =~ /POSTGRESQL/) { my $sch = AttrVal($name, "dbSchema", ""); @@ -3946,14 +4005,14 @@ sub DbLog_configcheck($) { } if($dbmodel =~ /SQLITE/) { my $dev = (DbLog_sqlget($hash,"SELECT sql FROM sqlite_master WHERE name = '$history'"))[0]; - $cdat_dev = $dev?$dev:"no result"; - $cdat_typ = $cdat_evt = $cdat_rdg = $cdat_val = $cdat_unt = $cdat_dev; - $cdat_dev =~ s/.*DEVICE.varchar\(([\d]*)\).*/$1/e; - $cdat_typ =~ s/.*TYPE.varchar\(([\d]*)\).*/$1/e; - $cdat_evt =~ s/.*EVENT.varchar\(([\d]*)\).*/$1/e; - $cdat_rdg =~ s/.*READING.varchar\(([\d]*)\).*/$1/e; - $cdat_val =~ s/.*VALUE.varchar\(([\d]*)\).*/$1/e; - $cdat_unt =~ s/.*UNIT.varchar\(([\d]*)\).*/$1/e; + $cdat_dev = $dev?$dev:"no result"; + $cdat_typ = $cdat_evt = $cdat_rdg = $cdat_val = $cdat_unt = $cdat_dev; + $cdat_dev =~ s/.*DEVICE.varchar\(([\d]*)\).*/$1/e; + $cdat_typ =~ s/.*TYPE.varchar\(([\d]*)\).*/$1/e; + $cdat_evt =~ s/.*EVENT.varchar\(([\d]*)\).*/$1/e; + $cdat_rdg =~ s/.*READING.varchar\(([\d]*)\).*/$1/e; + $cdat_val =~ s/.*VALUE.varchar\(([\d]*)\).*/$1/e; + $cdat_unt =~ s/.*UNIT.varchar\(([\d]*)\).*/$1/e; } if ($dbmodel !~ /SQLITE/) { $cdat_dev = @sr_dev?($sr_dev[1]):"no result"; @@ -3981,22 +4040,22 @@ sub DbLog_configcheck($) { } else { if ($dbmodel !~ /SQLITE/) { $rec = "The relation between column width in table $history and the field width used in device $name don't meet the requirements. "; - $rec .= "Please make sure that the width of database field definition is equal or larger than the field width used by the module. Compare the given results.
"; - $rec .= "Currently the default values for field width are:

"; - $rec .= "DEVICE: $columns{DEVICE}
"; - $rec .= "TYPE: $columns{TYPE}
"; - $rec .= "EVENT: $columns{EVENT}
"; - $rec .= "READING: $columns{READING}
"; - $rec .= "VALUE: $columns{VALUE}
"; - $rec .= "UNIT: $columns{UNIT}

"; + $rec .= "Please make sure that the width of database field definition is equal or larger than the field width used by the module. Compare the given results.
"; + $rec .= "Currently the default values for field width are:

"; + $rec .= "DEVICE: $columns{DEVICE}
"; + $rec .= "TYPE: $columns{TYPE}
"; + $rec .= "EVENT: $columns{EVENT}
"; + $rec .= "READING: $columns{READING}
"; + $rec .= "VALUE: $columns{VALUE}
"; + $rec .= "UNIT: $columns{UNIT}

"; $rec .= "You can change the column width in database by a statement like 'alter table $history modify VALUE varchar(128);' (example for changing field 'VALUE'). "; $rec .= "You can do it for example by executing 'sqlCmd' in DbRep or in a SQL-Editor of your choice. (switch $name to asynchron mode for non-blocking).
"; - $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; + $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; } else { - $rec = "WARNING - The relation between column width in table $history and the field width used by device $name should be equal but it differs."; - $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; - $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; - } + $rec = "WARNING - The relation between column width in table $history and the field width used by device $name should be equal but it differs."; + $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; + $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; + } } $check .= "Result of table '$history' check

"; @@ -4008,11 +4067,11 @@ sub DbLog_configcheck($) { ####################################################################### if($dbmodel =~ /MYSQL/) { @sr_dev = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='DEVICE'"); - @sr_typ = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='TYPE'"); - @sr_evt = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='EVENT'"); - @sr_rdg = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='READING'"); - @sr_val = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='VALUE'"); - @sr_unt = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='UNIT'"); + @sr_typ = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='TYPE'"); + @sr_evt = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='EVENT'"); + @sr_rdg = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='READING'"); + @sr_val = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='VALUE'"); + @sr_unt = DbLog_sqlget($hash,"SHOW FIELDS FROM $current where FIELD='UNIT'"); } if($dbmodel =~ /POSTGRESQL/) { @@ -4037,14 +4096,14 @@ sub DbLog_configcheck($) { } if($dbmodel =~ /SQLITE/) { my $dev = (DbLog_sqlget($hash,"SELECT sql FROM sqlite_master WHERE name = '$current'"))[0]; - $cdat_dev = $dev?$dev:"no result"; - $cdat_typ = $cdat_evt = $cdat_rdg = $cdat_val = $cdat_unt = $cdat_dev; - $cdat_dev =~ s/.*DEVICE.varchar\(([\d]*)\).*/$1/e; - $cdat_typ =~ s/.*TYPE.varchar\(([\d]*)\).*/$1/e; - $cdat_evt =~ s/.*EVENT.varchar\(([\d]*)\).*/$1/e; - $cdat_rdg =~ s/.*READING.varchar\(([\d]*)\).*/$1/e; - $cdat_val =~ s/.*VALUE.varchar\(([\d]*)\).*/$1/e; - $cdat_unt =~ s/.*UNIT.varchar\(([\d]*)\).*/$1/e; + $cdat_dev = $dev?$dev:"no result"; + $cdat_typ = $cdat_evt = $cdat_rdg = $cdat_val = $cdat_unt = $cdat_dev; + $cdat_dev =~ s/.*DEVICE.varchar\(([\d]*)\).*/$1/e; + $cdat_typ =~ s/.*TYPE.varchar\(([\d]*)\).*/$1/e; + $cdat_evt =~ s/.*EVENT.varchar\(([\d]*)\).*/$1/e; + $cdat_rdg =~ s/.*READING.varchar\(([\d]*)\).*/$1/e; + $cdat_val =~ s/.*VALUE.varchar\(([\d]*)\).*/$1/e; + $cdat_unt =~ s/.*UNIT.varchar\(([\d]*)\).*/$1/e; } if ($dbmodel !~ /SQLITE/) { $cdat_dev = @sr_dev?($sr_dev[1]):"no result"; @@ -4070,25 +4129,25 @@ sub DbLog_configcheck($) { if($cdat_dev >= $cmod_dev && $cdat_typ >= $cmod_typ && $cdat_evt >= $cmod_evt && $cdat_rdg >= $cmod_rdg && $cdat_val >= $cmod_val && $cdat_unt >= $cmod_unt) { $rec = "settings o.k."; } else { - if ($dbmodel !~ /SQLITE/) { + if ($dbmodel !~ /SQLITE/) { $rec = "The relation between column width in table $current and the field width used in device $name don't meet the requirements. "; - $rec .= "Please make sure that the width of database field definition is equal or larger than the field width used by the module. Compare the given results.
"; - $rec .= "Currently the default values for field width are:

"; - $rec .= "DEVICE: $columns{DEVICE}
"; - $rec .= "TYPE: $columns{TYPE}
"; - $rec .= "EVENT: $columns{EVENT}
"; - $rec .= "READING: $columns{READING}
"; - $rec .= "VALUE: $columns{VALUE}
"; - $rec .= "UNIT: $columns{UNIT}

"; + $rec .= "Please make sure that the width of database field definition is equal or larger than the field width used by the module. Compare the given results.
"; + $rec .= "Currently the default values for field width are:

"; + $rec .= "DEVICE: $columns{DEVICE}
"; + $rec .= "TYPE: $columns{TYPE}
"; + $rec .= "EVENT: $columns{EVENT}
"; + $rec .= "READING: $columns{READING}
"; + $rec .= "VALUE: $columns{VALUE}
"; + $rec .= "UNIT: $columns{UNIT}

"; $rec .= "You can change the column width in database by a statement like 'alter table $current modify VALUE varchar(128);' (example for changing field 'VALUE'). "; $rec .= "You can do it for example by executing 'sqlCmd' in DbRep or in a SQL-Editor of your choice. (switch $name to asynchron mode for non-blocking).
"; - $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; + $rec .= "Alternatively the field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; } else { - $rec = "WARNING - The relation between column width in table $current and the field width used by device $name should be equal but it differs. "; - $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; - $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; - } - } + $rec = "WARNING - The relation between column width in table $current and the field width used by device $name should be equal but it differs. "; + $rec .= "The field width used by $name can be adjusted by setting attributes 'colEvent', 'colReading', 'colValue'. (pls. refer to commandref)"; + $rec .= "Because you use SQLite this is only a warning. Normally the database can handle these differences. "; + } + } $check .= "Result of table '$current' check

"; $check .= "Column width set in DB $current: 'DEVICE' = $cdat_dev, 'TYPE' = $cdat_typ, 'EVENT' = $cdat_evt, 'READING' = $cdat_rdg, 'VALUE' = $cdat_val, 'UNIT' = $cdat_unt
"; @@ -4101,16 +4160,16 @@ sub DbLog_configcheck($) { my (@six,@six_dev,@six_rdg,@six_tsp); my ($idef,$idef_dev,$idef_rdg,$idef_tsp); $check .= "Result of check 'Search_Idx' availability

"; - + if($dbmodel =~ /MYSQL/) { @six = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx'"); - if (!@six) { - $check .= "The index 'Search_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP) USING BTREE;'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; - $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } else { + if (!@six) { + $check .= "The index 'Search_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP) USING BTREE;'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; + } else { @six_dev = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='DEVICE'"); @six_rdg = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='READING'"); @six_tsp = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Search_Idx' and Column_name='TIMESTAMP'"); @@ -4118,69 +4177,69 @@ sub DbLog_configcheck($) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; } else { - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!@six_dev); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@six_rdg); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@six_tsp); - $rec = "The index should contain the fields 'DEVICE', 'TIMESTAMP', 'READING'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'ALTER TABLE `$history` DROP INDEX `Search_Idx`, ADD INDEX `Search_Idx` (`DEVICE`, `READING`, `TIMESTAMP`) USING BTREE;'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!@six_dev); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@six_rdg); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@six_tsp); + $rec = "The index should contain the fields 'DEVICE', 'TIMESTAMP', 'READING'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'ALTER TABLE `$history` DROP INDEX `Search_Idx`, ADD INDEX `Search_Idx` (`DEVICE`, `READING`, `TIMESTAMP`) USING BTREE;'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } if($dbmodel =~ /POSTGRESQL/) { @six = DbLog_sqlget($hash,"SELECT * FROM pg_indexes WHERE tablename='$history' and indexname ='Search_Idx'"); - if (!@six) { - $check .= "The index 'Search_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX \"Search_Idx\" ON $history USING btree (device, reading, \"timestamp\")'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + if (!@six) { + $check .= "The index 'Search_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX \"Search_Idx\" ON $history USING btree (device, reading, \"timestamp\")'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } else { + } else { $idef = $six[4]; - $idef_dev = 1 if($idef =~ /device/); - $idef_rdg = 1 if($idef =~ /reading/); - $idef_tsp = 1 if($idef =~ /timestamp/); + $idef_dev = 1 if($idef =~ /device/); + $idef_rdg = 1 if($idef =~ /reading/); + $idef_tsp = 1 if($idef =~ /timestamp/); if ($idef_dev && $idef_rdg && $idef_tsp) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'READING', 'TIMESTAMP'.
"; $rec = "settings o.k."; } else { - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); - $rec = "The index should contain the fields 'DEVICE', 'READING', 'TIMESTAMP'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'DROP INDEX \"Search_Idx\"; CREATE INDEX \"Search_Idx\" ON $history USING btree (device, reading, \"timestamp\")'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); + $rec = "The index should contain the fields 'DEVICE', 'READING', 'TIMESTAMP'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'DROP INDEX \"Search_Idx\"; CREATE INDEX \"Search_Idx\" ON $history USING btree (device, reading, \"timestamp\")'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } if($dbmodel =~ /SQLITE/) { @six = DbLog_sqlget($hash,"SELECT name,sql FROM sqlite_master WHERE type='index' AND name='Search_Idx'"); - if (!$six[0]) { - $check .= "The index 'Search_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + if (!$six[0]) { + $check .= "The index 'Search_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } else { + } else { $idef = $six[1]; - $idef_dev = 1 if(lc($idef) =~ /device/); - $idef_rdg = 1 if(lc($idef) =~ /reading/); - $idef_tsp = 1 if(lc($idef) =~ /timestamp/); + $idef_dev = 1 if(lc($idef) =~ /device/); + $idef_rdg = 1 if(lc($idef) =~ /reading/); + $idef_tsp = 1 if(lc($idef) =~ /timestamp/); if ($idef_dev && $idef_rdg && $idef_tsp) { $check .= "Index 'Search_Idx' exists and contains recommended fields 'DEVICE', 'READING', 'TIMESTAMP'.
"; $rec = "settings o.k."; } else { - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); - $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); - $rec = "The index should contain the fields 'DEVICE', 'READING', 'TIMESTAMP'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'DROP INDEX \"Search_Idx\"; CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'DEVICE'.
" if (!$idef_dev); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$idef_rdg); + $check .= "Index 'Search_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$idef_tsp); + $rec = "The index should contain the fields 'DEVICE', 'READING', 'TIMESTAMP'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'DROP INDEX \"Search_Idx\"; CREATE INDEX Search_Idx ON `$history` (DEVICE, READING, TIMESTAMP)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } $check .= "Recommendation: $rec

"; @@ -4197,90 +4256,90 @@ sub DbLog_configcheck($) { $dbrp = $_; if(!$defs{$dbrp}) { Log3 ($name, 2, "DbLog $name -> Device '$dbrp' found by configCheck doesn't exist !"); - next; + next; } - if ($defs{$dbrp}->{DEF} eq $name) { - # DbRep Device verwendet aktuelles DbLog-Device + if ($defs{$dbrp}->{DEF} eq $name) { + # DbRep Device verwendet aktuelles DbLog-Device Log3 ($name, 5, "DbLog $name -> DbRep-Device '$dbrp' uses $name."); - $isused = 1; + $isused = 1; } } if ($isused) { - if($dbmodel =~ /MYSQL/) { + if($dbmodel =~ /MYSQL/) { @dix = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx'"); - if (!@dix) { - $check .= "At least one DbRep-device assigned to $name is used, but the recommended index 'Report_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING) USING BTREE;'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; - $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; - } else { + if (!@dix) { + $check .= "At least one DbRep-device assigned to $name is used, but the recommended index 'Report_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING) USING BTREE;'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; + } else { @dix_rdg = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx' and Column_name='READING'"); @dix_tsp = DbLog_sqlget($hash,"SHOW INDEX FROM $history where Key_name='Report_Idx' and Column_name='TIMESTAMP'"); if (@dix_rdg && @dix_tsp) { - $check .= "At least one DbRep-device assigned to $name is used. "; + $check .= "At least one DbRep-device assigned to $name is used. "; $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; } else { - $check .= "You use at least one DbRep-device assigned to $name. "; - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@dix_rdg); - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@dix_tsp); - $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'ALTER TABLE `$history` DROP INDEX `Report_Idx`, ADD INDEX `Report_Idx` (`TIMESTAMP`, `READING`) USING BTREE'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "You use at least one DbRep-device assigned to $name. "; + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!@dix_rdg); + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!@dix_tsp); + $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'ALTER TABLE `$history` DROP INDEX `Report_Idx`, ADD INDEX `Report_Idx` (`TIMESTAMP`, `READING`) USING BTREE'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } - if($dbmodel =~ /POSTGRESQL/) { + if($dbmodel =~ /POSTGRESQL/) { @dix = DbLog_sqlget($hash,"SELECT * FROM pg_indexes WHERE tablename='$history' and indexname ='Report_Idx'"); - if (!@dix) { - $check .= "You use at least one DbRep-device assigned to $name, but the recommended index 'Report_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX \"Report_Idx\" ON $history USING btree (\"timestamp\", reading)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; - $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; - } else { + if (!@dix) { + $check .= "You use at least one DbRep-device assigned to $name, but the recommended index 'Report_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX \"Report_Idx\" ON $history USING btree (\"timestamp\", reading)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Report_Idx' as well !
"; + } else { $irep = $dix[4]; - $irep_rdg = 1 if($irep =~ /reading/); - $irep_tsp = 1 if($irep =~ /timestamp/); + $irep_rdg = 1 if($irep =~ /reading/); + $irep_tsp = 1 if($irep =~ /timestamp/); if ($irep_rdg && $irep_tsp) { $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; } else { - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); - $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'DROP INDEX \"Report_Idx\"; CREATE INDEX \"Report_Idx\" ON $history USING btree (\"timestamp\", reading)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); + $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'DROP INDEX \"Report_Idx\"; CREATE INDEX \"Report_Idx\" ON $history USING btree (\"timestamp\", reading)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } if($dbmodel =~ /SQLITE/) { @dix = DbLog_sqlget($hash,"SELECT name,sql FROM sqlite_master WHERE type='index' AND name='Report_Idx'"); - if (!$dix[0]) { - $check .= "The index 'Report_Idx' is missing.
"; - $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; + if (!$dix[0]) { + $check .= "The index 'Report_Idx' is missing.
"; + $rec = "You can create the index by executing statement 'CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + $rec .= "Please make sure the device '$name' is operating in asynchronous mode to avoid FHEM from blocking when creating the index.
"; $rec .= "Note: If you have just created another index which covers the same fields and order as suggested (e.g. a primary key) you don't need to create the 'Search_Idx' as well !
"; - } else { + } else { $irep = $dix[1]; - $irep_rdg = 1 if(lc($irep) =~ /reading/); - $irep_tsp = 1 if(lc($irep) =~ /timestamp/); + $irep_rdg = 1 if(lc($irep) =~ /reading/); + $irep_tsp = 1 if(lc($irep) =~ /timestamp/); if ($irep_rdg && $irep_tsp) { $check .= "Index 'Report_Idx' exists and contains recommended fields 'TIMESTAMP', 'READING'.
"; $rec = "settings o.k."; } else { - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); - $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); - $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; - $rec .= "You can change the index by executing e.g.
"; - $rec .= "'DROP INDEX \"Report_Idx\"; CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING)'
"; - $rec .= "Depending on your database size this command may running a long time.
"; - } - } + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'READING'.
" if (!$irep_rdg); + $check .= "Index 'Report_Idx' exists but doesn't contain recommended field 'TIMESTAMP'.
" if (!$irep_tsp); + $rec = "The index should contain the fields 'TIMESTAMP', 'READING'. "; + $rec .= "You can change the index by executing e.g.
"; + $rec .= "'DROP INDEX \"Report_Idx\"; CREATE INDEX Report_Idx ON `$history` (TIMESTAMP,READING)'
"; + $rec .= "Depending on your database size this command may running a long time.
"; + } + } } } else { $check .= "No DbRep-device assigned to $name is used. Hence an index for DbRep isn't needed.
"; @@ -4401,7 +4460,7 @@ sub DbLog_sqlget($$) { eval { $sth = $dbh->prepare("$sql"); $sth->execute; - }; + }; if($@) { $dbh->disconnect if($dbh); Log3 ($name, 2, "DbLog $name - $@"); @@ -4456,40 +4515,40 @@ sub DbLog_AddLog($$$$$) { $dev_name = $_; if(!$defs{$dev_name}) { Log3 $name, 2, "DbLog $name -> Device '$dev_name' used by addLog doesn't exist !"; - next; + next; } - - my $r = $defs{$dev_name}{READINGS}; + + my $r = $defs{$dev_name}{READINGS}; my $DbLogExclude = AttrVal($dev_name, "DbLogExclude", undef); - my $DbLogInclude = AttrVal($dev_name, "DbLogInclude", undef); - my @exrds; + my $DbLogInclude = AttrVal($dev_name, "DbLogInclude", undef); + my @exrds; my $found = 0; - foreach my $rd (sort keys %{$r}) { # jedes Reading des Devices auswerten + foreach my $rd (sort keys %{$r}) { # jedes Reading des Devices auswerten my $do = 1; $found = 1 if($rd =~ m/^$rdspec$/); # Reading gefunden - if($DbLogExclude && !$nce) { + if($DbLogExclude && !$nce) { my @v1 = split(/,/, $DbLogExclude); for (my $i=0; $i ausschließen vom addLog - $do = 0; - if($DbLogInclude) { - my @v3 = split(/,/, $DbLogInclude); - for (my $i=0; $i wieder in addLog einschließen - } - } - Log3 $name, 2, "DbLog $name -> Device: \"$dev_name\", reading: \"$v2[0]\" excluded by attribute DbLogExclude from addLog !" if($do == 0 && $rd =~ m/^$rdspec$/); - } + $do = 0; + if($DbLogInclude) { + my @v3 = split(/,/, $DbLogInclude); + for (my $i=0; $i wieder in addLog einschließen + } + } + Log3 $name, 2, "DbLog $name -> Device: \"$dev_name\", reading: \"$v2[0]\" excluded by attribute DbLogExclude from addLog !" if($do == 0 && $rd =~ m/^$rdspec$/); + } } } next if(!$do); - push @exrds,$rd if($rd =~ m/^$rdspec$/); - } - Log3 $name, 4, "DbLog $name -> Readings extracted from Regex: @exrds"; + push @exrds,$rd if($rd =~ m/^$rdspec$/); + } + Log3 $name, 4, "DbLog $name -> Readings extracted from Regex: @exrds"; - if(!$found) { + if(!$found) { if(goodReadingName($rdspec) && defined($value)) { Log3 $name, 3, "DbLog $name -> addLog WARNING - Device: '$dev_name' -> Reading '$rdspec' not found - add it as new reading."; push @exrds,$rdspec; @@ -4501,15 +4560,15 @@ sub DbLog_AddLog($$$$$) { } no warnings 'uninitialized'; - foreach (@exrds) { - $dev_reading = $_; + foreach (@exrds) { + $dev_reading = $_; $read_val = $value ne ""?$value:ReadingsVal($dev_name,$dev_reading,""); - $dev_type = uc($defs{$dev_name}{TYPE}); + $dev_type = uc($defs{$dev_name}{TYPE}); # dummy-Event zusammenstellen - $event = $dev_reading.": ".$read_val; + $event = $dev_reading.": ".$read_val; - # den zusammengestellten Event parsen lassen (evtl. Unit zuweisen) + # den zusammengestellten Event parsen lassen (evtl. Unit zuweisen) my @r = DbLog_ParseEvent($name,$dev_name, $dev_type, $event); $dev_reading = $r[0]; $read_val = $r[1]; @@ -4518,25 +4577,36 @@ sub DbLog_AddLog($$$$$) { if(!defined $read_val) {$read_val = "";} if(!defined $ut || $ut eq "") {$ut = AttrVal("$dev_name", "unit", "");} $event = "addLog"; - - $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME} = $now; + + $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME} = $now; $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE} = $read_val; $ts = TimeNow(); - # Anwender spezifische Funktion anwenden + + # Anwender spezifische Funktion anwenden if($value_fn ne '') { - my $TIMESTAMP = $ts; - my $DEVICE = $dev_name; - my $DEVICETYPE = $dev_type; - my $EVENT = $event; - my $READING = $dev_reading; - my $VALUE = $read_val; - my $UNIT = $ut; - my $IGNORE = 0; - my $CN = $cn?$cn:""; + my $lastt = $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME}; # patch Forum:#111423 + my $lastv = $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE}; + + my $TIMESTAMP = $ts; + my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 + my $DEVICE = $dev_name; + my $DEVICETYPE = $dev_type; + my $EVENT = $event; + my $READING = $dev_reading; + my $VALUE = $read_val; + my $LASTVALUE = $lastv // ""; # patch Forum:#111423 + my $UNIT = $ut; + my $IGNORE = 0; + my $CN = $cn?$cn:""; - eval $value_fn; - Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - next if($IGNORE); # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt in $value_fn + eval $value_fn; + Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); + + if($IGNORE) { # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt + $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{TIME} = $lastt if($lastt); # patch Forum:#111423 + $defs{$dev_name}{Helper}{DBLOG}{$dev_reading}{$hash->{NAME}}{VALUE} = $lastv if(defined $lastv); + next; + } my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; @@ -4545,13 +4615,13 @@ sub DbLog_AddLog($$$$$) { } else { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } - $dev_name = $DEVICE if($DEVICE ne ''); - $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); - $dev_reading = $READING if($READING ne ''); - $read_val = $VALUE if(defined $VALUE); - $ut = $UNIT if(defined $UNIT); + $dev_name = $DEVICE if($DEVICE ne ''); + $dev_type = $DEVICETYPE if($DEVICETYPE ne ''); + $dev_reading = $READING if($READING ne ''); + $read_val = $VALUE if(defined $VALUE); + $ut = $UNIT if(defined $UNIT); } - + # Daten auf maximale Länge beschneiden ($dev_name,$dev_type,$event,$dev_reading,$read_val,$ut) = DbLog_cutCol($hash,$dev_name,$dev_type,$event,$dev_reading,$read_val,$ut); @@ -4562,39 +4632,39 @@ sub DbLog_AddLog($$$$$) { my $row = ($ts."|".$dev_name."|".$dev_type."|".$event."|".$dev_reading."|".$read_val."|".$ut); Log3 $hash->{NAME}, 3, "DbLog $name -> addLog created - TS: $ts, Device: $dev_name, Type: $dev_type, Event: $event, Reading: $dev_reading, Value: $read_val, Unit: $ut" - if(!AttrVal($name, "suppressAddLogV3",0)); + if(!AttrVal($name, "suppressAddLogV3",0)); if($async) { # asynchoner non-blocking Mode - # Cache & CacheIndex für Events zum asynchronen Schreiben in DB - $data{DbLog}{$name}{cache}{index}++; - my $index = $data{DbLog}{$name}{cache}{index}; - $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; - if($ce == 1) { + # Cache & CacheIndex für Events zum asynchronen Schreiben in DB + $data{DbLog}{$name}{cache}{index}++; + my $index = $data{DbLog}{$name}{cache}{index}; + $data{DbLog}{$name}{cache}{memcache}{$index} = $row; + my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; + if($ce == 1) { readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); - } else { - readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); - } + } else { + readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + } } else { - # synchoner Mode - push(@row_array, $row); + # synchoner Mode + push(@row_array, $row); } - } + } use warnings; } if(!$async) { if(@row_array) { - # synchoner Mode - # return wenn "reopen" mit Ablaufzeit gestartet ist - return if($hash->{HELPER}{REOPEN_RUNS}); + # synchoner Mode + # return wenn "reopen" mit Ablaufzeit gestartet ist + return if($hash->{HELPER}{REOPEN_RUNS}); my $error = DbLog_Push($hash, 1, @row_array); my $state = $error?$error:(IsDisabled($name))?"disabled":"connected"; my $evt = ($state eq $hash->{HELPER}{OLDSTATE})?0:1; readingsSingleUpdate($hash, "state", $state, $evt); $hash->{HELPER}{OLDSTATE} = $state; - + Log3 $name, 5, "DbLog $name -> DbLog_Push Returncode: $error"; } } @@ -4618,24 +4688,36 @@ sub DbLog_addCacheLine($$$$$$$$) { } else { $value_fn = ''; } + if($value_fn ne '') { - my $TIMESTAMP = $i_timestamp; - my $DEVICE = $i_dev; - my $DEVICETYPE = $i_type; - my $EVENT = $i_evt; - my $READING = $i_reading; - my $VALUE = $i_val; - my $UNIT = $i_unit; - my $IGNORE = 0; - my $CN = " "; + my $lastt; + my $lastv; + if($defs{$i_dev}) { + $lastt = $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{TIME}; + $lastv = $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{VALUE}; + } + + my $TIMESTAMP = $i_timestamp; + my $LASTTIMESTAMP = $lastt // 0; # patch Forum:#111423 + my $DEVICE = $i_dev; + my $DEVICETYPE = $i_type; + my $EVENT = $i_evt; + my $READING = $i_reading; + my $VALUE = $i_val; + my $LASTVALUE = $lastv // ""; # patch Forum:#111423 + my $UNIT = $i_unit; + my $IGNORE = 0; + my $CN = " "; - eval $value_fn; - Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); - if($IGNORE) { - # aktueller Event wird nicht geloggt wenn $IGNORE=1 gesetzt in $value_fn - Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by valueFn - TS: $i_timestamp, Device: $i_dev, Type: $i_type, Event: $i_evt, Reading: $i_reading, Value: $i_val, Unit: $i_unit"; - next; - } + eval $value_fn; + Log3 $name, 2, "DbLog $name -> error valueFn: ".$@ if($@); + + if($IGNORE) { # kein add wenn $IGNORE=1 gesetzt + $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{TIME} = $lastt if($defs{$i_dev} && $lastt); # patch Forum:#111423 + $defs{$i_dev}{Helper}{DBLOG}{$i_reading}{$hash->{NAME}}{VALUE} = $lastv if($defs{$i_dev} && defined $lastv); + Log3 $hash->{NAME}, 4, "DbLog $name -> Event ignored by valueFn - TS: $i_timestamp, Device: $i_dev, Type: $i_type, Event: $i_evt, Reading: $i_reading, Value: $i_val, Unit: $i_unit"; + next; + } my ($yyyy, $mm, $dd, $hh, $min, $sec) = ($TIMESTAMP =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/); eval { my $epoch_seconds_begin = timelocal($sec, $min, $hh, $dd, $mm-1, $yyyy-1900); }; @@ -4645,13 +4727,13 @@ sub DbLog_addCacheLine($$$$$$$$) { Log3 ($name, 2, "DbLog $name -> Parameter TIMESTAMP got from valueFn is invalid: $TIMESTAMP"); } - $i_dev = $DEVICE if($DEVICE ne ''); - $i_type = $DEVICETYPE if($DEVICETYPE ne ''); - $i_reading = $READING if($READING ne ''); - $i_val = $VALUE if(defined $VALUE); - $i_unit = $UNIT if(defined $UNIT); + $i_dev = $DEVICE if($DEVICE ne ''); + $i_type = $DEVICETYPE if($DEVICETYPE ne ''); + $i_reading = $READING if($READING ne ''); + $i_val = $VALUE if(defined $VALUE); + $i_unit = $UNIT if(defined $UNIT); } - + no warnings 'uninitialized'; # Daten auf maximale Länge beschneiden ($i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit) = DbLog_cutCol($hash,$i_dev,$i_type,$i_evt,$i_reading,$i_val,$i_unit); @@ -4663,21 +4745,20 @@ sub DbLog_addCacheLine($$$$$$$$) { eval { # one transaction $data{DbLog}{$name}{cache}{index}++; - my $index = $data{DbLog}{$name}{cache}{index}; - $data{DbLog}{$name}{cache}{memcache}{$index} = $row; - + my $index = $data{DbLog}{$name}{cache}{index}; + $data{DbLog}{$name}{cache}{memcache}{$index} = $row; + my $memcount = $data{DbLog}{$name}{cache}{memcache}?scalar(keys %{$data{DbLog}{$name}{cache}{memcache}}):0; - if($ce == 1) { + if($ce == 1) { readingsSingleUpdate($hash, "CacheUsage", $memcount, 1); - } else { - readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); - } + } else { + readingsSingleUpdate($hash, 'CacheUsage', $memcount, 0); + } }; return; } - ######################################################################################### # # Subroutine cutCol - Daten auf maximale Länge beschneiden @@ -4777,7 +4858,7 @@ sub DbLog_reduceLog($@) { Log3($name, 3, "DbLog $name: reduceLog requested with DAYS=$a[2]" .(($average || $filter) ? ', ' : '').(($average) ? "$average" : '') .(($average && $filter) ? ", " : '').(($filter) ? uc((split('=',$a[-1]))[0]).'='.(split('=',$a[-1]))[1] : '')); - + my ($useac,$useta) = DbLog_commitMode($hash); my $ac = ($dbh->{AutoCommit})?"ON":"OFF"; my $tm = ($useta)?"ON":"OFF"; @@ -4799,7 +4880,7 @@ sub DbLog_reduceLog($@) { } if ($ots) { - my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); + my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); eval { $sth_del = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); $sth_upd = $dbh->prepare_cached("UPDATE $history SET TIMESTAMP=?, EVENT=?, VALUE=? WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); $sth_delD = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?)"); @@ -4807,7 +4888,7 @@ sub DbLog_reduceLog($@) { $sth_get = $dbh->prepare("SELECT TIMESTAMP,DEVICE,'',READING,VALUE FROM $history WHERE " .($a[-1] =~ /^INCLUDE=(.+):(.+)$/i ? "DEVICE like '$1' AND READING like '$2' AND " : '') ."TIMESTAMP < $ots".($nts?" AND TIMESTAMP >= $nts ":" ")."ORDER BY TIMESTAMP ASC"); # '' was EVENT, no longer in use - }; + }; $sth_get->execute(); @@ -4831,20 +4912,20 @@ sub DbLog_reduceLog($@) { $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; eval { - my $i = 0; - my $k = 1; - my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; + my $i = 0; + my $k = 1; + my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; for my $delRow (@dayRows) { if($day != 00 || $delRow->[0] !~ /$lastHour/) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE (DEVICE=$delRow->[1]) AND (READING=$delRow->[3]) AND (TIMESTAMP=$delRow->[0]) AND (VALUE=$delRow->[4])"); $sth_del->execute(($delRow->[1], $delRow->[3], $delRow->[0], $delRow->[4])); - $i++; - if($i == $th) { - my $prog = $k * $i; - Log3($name, 3, "DbLog $name: reduceLog deletion progress of day: $processingDay is: $prog"); - $i = 0; - $k++; - } + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3($name, 3, "DbLog $name: reduceLog deletion progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } } } }; @@ -4877,9 +4958,9 @@ sub DbLog_reduceLog($@) { $updateCount += $c; Log3($name, 3, "DbLog $name: reduceLog (hourly-average) updating $c records of day: $processingDay") if($c); # else only push to @averageUpdD - my $i = 0; - my $k = 1; - my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; + my $i = 0; + my $k = 1; + my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; for my $hourHash (@averageUpd) { for my $hourKey (keys %$hourHash) { if ($hourHash->{$hourKey}->[0]) { # true if reading is a number @@ -4891,14 +4972,14 @@ sub DbLog_reduceLog($@) { Log3($name, 5, "DbLog $name: UPDATE $history SET TIMESTAMP=$updDate $updHour:30:00, EVENT='rl_av_h', VALUE=$average WHERE DEVICE=$hourHash->{$hourKey}->[1] AND READING=$hourHash->{$hourKey}->[3] AND TIMESTAMP=$hourHash->{$hourKey}->[0] AND VALUE=$hourHash->{$hourKey}->[4]->[0]"); $sth_upd->execute(("$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[4]->[0])); - $i++; - if($i == $th) { - my $prog = $k * $i; - Log3($name, 3, "DbLog $name: reduceLog (hourly-average) updating progress of day: $processingDay is: $prog"); - $i = 0; - $k++; - } - push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3($name, 3, "DbLog $name: reduceLog (hourly-average) updating progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } + push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } else { push(@averageUpdD, [$hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[2], $hourHash->{$hourKey}->[4]->[0], $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } @@ -4940,11 +5021,11 @@ sub DbLog_reduceLog($@) { } $deletedCount += $c; $updateCount += keys(%averageHash); - - my ($id,$iu) = 0; - my ($kd,$ku) = 1; - my $thd = ($c <= 2000)?100:($c <= 30000)?1000:10000; - my $thu = ((keys %averageHash) <= 2000)?100:((keys %averageHash) <= 30000)?1000:10000; + + my ($id,$iu) = 0; + my ($kd,$ku) = 1; + my $thd = ($c <= 2000)?100:($c <= 30000)?1000:10000; + my $thu = ((keys %averageHash) <= 2000)?100:((keys %averageHash) <= 30000)?1000:10000; Log3($name, 3, "DbLog $name: reduceLog (daily-average) updating ".(keys %averageHash).", deleting $c records of day: $processingDay") if(keys %averageHash); for my $reading (keys %averageHash) { $average = sprintf('%.3f', $averageHash{$reading}->{sum}/scalar(@{$averageHash{$reading}->{tedr}})); @@ -4952,26 +5033,26 @@ sub DbLog_reduceLog($@) { for (@{$averageHash{$reading}->{tedr}}) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE DEVICE='$_->[2]' AND READING='$_->[3]' AND TIMESTAMP='$_->[0]'"); $sth_delD->execute(($_->[2], $_->[3], $_->[0])); - - $id++; - if($id == $thd) { - my $prog = $kd * $id; - Log3($name, 3, "DbLog $name: reduceLog (daily-average) deleting progress of day: $processingDay is: $prog"); - $id = 0; - $kd++; - } + + $id++; + if($id == $thd) { + my $prog = $kd * $id; + Log3($name, 3, "DbLog $name: reduceLog (daily-average) deleting progress of day: $processingDay is: $prog"); + $id = 0; + $kd++; + } } Log3($name, 5, "DbLog $name: UPDATE $history SET TIMESTAMP=$averageHash{$reading}->{date} 12:00:00, EVENT='rl_av_d', VALUE=$average WHERE (DEVICE=$lastUpdH->[2]) AND (READING=$lastUpdH->[3]) AND (TIMESTAMP=$lastUpdH->[0])"); $sth_updD->execute(($averageHash{$reading}->{date}." 12:00:00", 'rl_av_d', $average, $lastUpdH->[2], $lastUpdH->[3], $lastUpdH->[0])); - $iu++; - if($iu == $thu) { - my $prog = $ku * $id; - Log3($name, 3, "DbLog $name: reduceLog (daily-average) updating progress of day: $processingDay is: $prog"); - $iu = 0; - $ku++; - } - } + $iu++; + if($iu == $thu) { + my $prog = $ku * $id; + Log3($name, 3, "DbLog $name: reduceLog (daily-average) updating progress of day: $processingDay is: $prog"); + $iu = 0; + $ku++; + } + } }; if ($@) { $err = $@; @@ -5025,7 +5106,7 @@ sub DbLog_reduceLog($@) { .(($excludeCount)? ", excluded: $excludeCount" : '') .", time: ".sprintf('%.2f',time() - $startTime)."sec"; Log3($name, 3, "DbLog $name: reduceLog executed. $result"); - readingsSingleUpdate($hash,"reduceLogState",$result,1); + readingsSingleUpdate($hash,"reduceLogState",$result,1); $ret = "reduceLog executed. $result"; } $dbh->disconnect(); @@ -5042,24 +5123,24 @@ sub DbLog_reduceLogNbl($) { my $dbuser = $hash->{dbuser}; my $dbpassword = $attr{"sec$name"}{secret}; my @a = @{$hash->{HELPER}{REDUCELOG}}; - my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; + my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; my $history = $hash->{HELPER}{TH}; my $current = $hash->{HELPER}{TC}; delete $hash->{HELPER}{REDUCELOG}; my ($ret,$row,$filter,$exclude,$c,$day,$hour,$lastHour,$updDate,$updHour,$average,$processingDay,$lastUpdH,%hourlyKnown,%averageHash,@excludeRegex,@dayRows,@averageUpd,@averageUpdD); my ($startTime,$currentHour,$currentDay,$deletedCount,$updateCount,$sum,$rowCount,$excludeCount) = (time(),99,0,0,0,0,0,0); my ($dbh,$err); - - Log3 ($name, 5, "DbLog $name -> Start DbLog_reduceLogNbl"); - + + Log3 ($name, 5, "DbLog $name -> Start DbLog_reduceLogNbl"); + my ($useac,$useta) = DbLog_commitMode($hash); if(!$useac) { - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, AutoCommit => 0 });}; } elsif($useac == 1) { - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, AutoCommit => 1 });}; } else { # Server default - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1 });}; } if ($@) { $err = encode_base64($@,""); @@ -5102,7 +5183,7 @@ sub DbLog_reduceLogNbl($) { } if ($ots) { - my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); + my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); eval { $sth_del = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); $sth_upd = $dbh->prepare_cached("UPDATE $history SET TIMESTAMP=?, EVENT=?, VALUE=? WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); $sth_delD = $dbh->prepare_cached("DELETE FROM $history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?)"); @@ -5110,15 +5191,15 @@ sub DbLog_reduceLogNbl($) { $sth_get = $dbh->prepare("SELECT TIMESTAMP,DEVICE,'',READING,VALUE FROM $history WHERE " .($a[-1] =~ /^INCLUDE=(.+):(.+)$/i ? "DEVICE like '$1' AND READING like '$2' AND " : '') ."TIMESTAMP < $ots".($nts?" AND TIMESTAMP >= $nts ":" ")."ORDER BY TIMESTAMP ASC"); # '' was EVENT, no longer in use - }; + }; if ($@) { $err = encode_base64($@,""); Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); return "$name|''|$err"; } - - eval { $sth_get->execute(); }; + + eval { $sth_get->execute(); }; if ($@) { $err = encode_base64($@,""); Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); @@ -5145,24 +5226,24 @@ sub DbLog_reduceLogNbl($) { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } eval { - my $i = 0; - my $k = 1; - my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; + my $i = 0; + my $k = 1; + my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; for my $delRow (@dayRows) { if($day != 00 || $delRow->[0] !~ /$lastHour/) { Log3($name, 4, "DbLog $name: DELETE FROM $history WHERE (DEVICE=$delRow->[1]) AND (READING=$delRow->[3]) AND (TIMESTAMP=$delRow->[0]) AND (VALUE=$delRow->[4])"); $sth_del->execute(($delRow->[1], $delRow->[3], $delRow->[0], $delRow->[4])); - $i++; - if($i == $th) { - my $prog = $k * $i; - Log3($name, 3, "DbLog $name: reduceLogNbl deletion progress of day: $processingDay is: $prog"); - $i = 0; - $k++; - } + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3($name, 3, "DbLog $name: reduceLogNbl deletion progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } } } }; @@ -5170,13 +5251,13 @@ sub DbLog_reduceLogNbl($) { $err = $@; Log3($hash->{NAME}, 2, "DbLog $name - reduceLogNbl ! FAILED ! for day $processingDay: $err"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } $ret = 0; } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } } @@ -5190,7 +5271,7 @@ sub DbLog_reduceLogNbl($) { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } eval { @@ -5205,9 +5286,9 @@ sub DbLog_reduceLogNbl($) { $updateCount += $c; Log3($name, 3, "DbLog $name: reduceLogNbl (hourly-average) updating $c records of day: $processingDay") if($c); # else only push to @averageUpdD - my $i = 0; - my $k = 1; - my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; + my $i = 0; + my $k = 1; + my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; for my $hourHash (@averageUpd) { for my $hourKey (keys %$hourHash) { if ($hourHash->{$hourKey}->[0]) { # true if reading is a number @@ -5218,35 +5299,35 @@ sub DbLog_reduceLogNbl($) { $sum = 0; Log3($name, 4, "DbLog $name: UPDATE $history SET TIMESTAMP=$updDate $updHour:30:00, EVENT='rl_av_h', VALUE=$average WHERE DEVICE=$hourHash->{$hourKey}->[1] AND READING=$hourHash->{$hourKey}->[3] AND TIMESTAMP=$hourHash->{$hourKey}->[0] AND VALUE=$hourHash->{$hourKey}->[4]->[0]"); $sth_upd->execute(("$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[4]->[0])); - - $i++; - if($i == $th) { - my $prog = $k * $i; - Log3($name, 3, "DbLog $name: reduceLogNbl (hourly-average) updating progress of day: $processingDay is: $prog"); - $i = 0; - $k++; - } + + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3($name, 3, "DbLog $name: reduceLogNbl (hourly-average) updating progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } else { push(@averageUpdD, [$hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[2], $hourHash->{$hourKey}->[4]->[0], $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); } - } - } + } + } } }; if ($@) { $err = $@; Log3($hash->{NAME}, 2, "DbLog $name - reduceLogNbl average=hour ! FAILED ! for day $processingDay: $err"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } @averageUpdD = (); } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); - } + } } $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; @@ -5257,7 +5338,7 @@ sub DbLog_reduceLogNbl($) { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; eval {$dbh->begin_work() if($dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } eval { @@ -5278,10 +5359,10 @@ sub DbLog_reduceLogNbl($) { $deletedCount += $c; $updateCount += keys(%averageHash); - my ($id,$iu) = 0; - my ($kd,$ku) = 1; - my $thd = ($c <= 2000)?100:($c <= 30000)?1000:10000; - my $thu = ((keys %averageHash) <= 2000)?100:((keys %averageHash) <= 30000)?1000:10000; + my ($id,$iu) = 0; + my ($kd,$ku) = 1; + my $thd = ($c <= 2000)?100:($c <= 30000)?1000:10000; + my $thu = ((keys %averageHash) <= 2000)?100:((keys %averageHash) <= 30000)?1000:10000; Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) updating ".(keys %averageHash).", deleting $c records of day: $processingDay") if(keys %averageHash); for my $reading (keys %averageHash) { $average = sprintf('%.3f', $averageHash{$reading}->{sum}/scalar(@{$averageHash{$reading}->{tedr}})); @@ -5289,36 +5370,36 @@ sub DbLog_reduceLogNbl($) { for (@{$averageHash{$reading}->{tedr}}) { Log3($name, 5, "DbLog $name: DELETE FROM $history WHERE DEVICE='$_->[2]' AND READING='$_->[3]' AND TIMESTAMP='$_->[0]'"); $sth_delD->execute(($_->[2], $_->[3], $_->[0])); - - $id++; - if($id == $thd) { - my $prog = $kd * $id; - Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) deleting progress of day: $processingDay is: $prog"); - $id = 0; - $kd++; - } + + $id++; + if($id == $thd) { + my $prog = $kd * $id; + Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) deleting progress of day: $processingDay is: $prog"); + $id = 0; + $kd++; + } } Log3($name, 4, "DbLog $name: UPDATE $history SET TIMESTAMP=$averageHash{$reading}->{date} 12:00:00, EVENT='rl_av_d', VALUE=$average WHERE (DEVICE=$lastUpdH->[2]) AND (READING=$lastUpdH->[3]) AND (TIMESTAMP=$lastUpdH->[0])"); $sth_updD->execute(($averageHash{$reading}->{date}." 12:00:00", 'rl_av_d', $average, $lastUpdH->[2], $lastUpdH->[3], $lastUpdH->[0])); - - $iu++; - if($iu == $thu) { - my $prog = $ku * $id; - Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) updating progress of day: $processingDay is: $prog"); - $iu = 0; - $ku++; - } - } + + $iu++; + if($iu == $thu) { + my $prog = $ku * $id; + Log3($name, 3, "DbLog $name: reduceLogNbl (daily-average) updating progress of day: $processingDay is: $prog"); + $iu = 0; + $ku++; + } + } }; if ($@) { Log3($hash->{NAME}, 3, "DbLog $name: reduceLogNbl average=day ! FAILED ! for day $processingDay"); eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } } else { eval {$dbh->commit() if(!$dbh->{AutoCommit});}; - if ($@) { + if ($@) { Log3 ($name, 2, "DbLog $name -> DbLog_reduceLogNbl - $@"); } } @@ -5371,10 +5452,10 @@ sub DbLog_reduceLogNbl($) { $ret = "reduceLogNbl finished. $result"; } - $dbh->disconnect(); + $dbh->disconnect(); $ret = encode_base64($ret,""); - Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); - + Log3 ($name, 5, "DbLog $name -> DbLog_reduceLogNbl finished"); + return "$name|$ret|0"; } @@ -5413,12 +5494,12 @@ sub DbLog_countNbl($) { return "$name|0|0|$err|0"; } else { Log3 $name,4,"DbLog $name: Records count requested."; - # SQL-Startzeit + # SQL-Startzeit $st = [gettimeofday]; $hc = $dbh->selectrow_array("SELECT count(*) FROM $history"); $cc = $dbh->selectrow_array("SELECT count(*) FROM $current"); $dbh->disconnect(); - # SQL-Laufzeit ermitteln + # SQL-Laufzeit ermitteln $rt = tv_interval($st); } @@ -5478,12 +5559,12 @@ sub DbLog_deldaysNbl($) { my ($useac,$useta) = DbLog_commitMode($hash); if(!$useac) { - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 0, AutoInactiveDestroy => 1 });}; } elsif($useac == 1) { - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, AutoInactiveDestroy => 1 });}; } else { # Server default - eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1 });}; + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1 });}; } if ($@) { $error = encode_base64($@,""); @@ -5504,9 +5585,9 @@ sub DbLog_deldaysNbl($) { } elsif ($hash->{MODEL} eq 'POSTGRESQL') { $cmd .= "NOW() - INTERVAL '$days' DAY"; } else { - $ret = 'Unknown database type. Maybe you can try userCommand anyway.'; - $error = encode_base64($ret,""); - Log3 ($name, 2, "DbLog $name - Error: $ret"); + $ret = 'Unknown database type. Maybe you can try userCommand anyway.'; + $error = encode_base64($ret,""); + Log3 ($name, 2, "DbLog $name - Error: $ret"); Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl finished"); return "$name|0|0|$error"; } @@ -5558,7 +5639,7 @@ sub DbLog_deldaysNbl_done($) { if ($err) { readingsSingleUpdate($hash,"state",$err,1); - delete $hash->{HELPER}{DELDAYS_PID}; + delete $hash->{HELPER}{DELDAYS_PID}; Log3 ($name, 5, "DbLog $name -> DbLog_deldaysNbl_done finished"); return; } else { @@ -5568,8 +5649,8 @@ sub DbLog_deldaysNbl_done($) { readingsBulkUpdate($hash, "background_processing_time", sprintf("%.4f",$brt)); readingsBulkUpdate($hash, "sql_processing_time", sprintf("%.4f",$rt)); readingsEndUpdate($hash, 1); - } - readingsSingleUpdate($hash, "lastRowsDeleted", $rows ,1); + } + readingsSingleUpdate($hash, "lastRowsDeleted", $rows ,1); } my $db = (split(/;|=/, $hash->{dbconn}))[1]; Log3 ($name, 3, "DbLog $name -> deleteOldDaysNbl finished. $rows entries of database $db deleted."); @@ -5614,12 +5695,12 @@ sub DbLog_reopen ($){ # Statusbit "Kein Schreiben in DB erlauben" löschen my $delay = delete $hash->{HELPER}{REOPEN_RUNS}; delete $hash->{HELPER}{REOPEN_RUNS_UNTIL}; - Log3($name, 2, "DbLog $name: Database connection reopened (it was $delay seconds closed).") if($delay); - readingsSingleUpdate($hash, "state", "reopened", 1); - $hash->{HELPER}{OLDSTATE} = "reopened"; - DbLog_execmemcache($hash) if($async); + Log3($name, 2, "DbLog $name: Database connection reopened (it was $delay seconds closed).") if($delay); + readingsSingleUpdate($hash, "state", "reopened", 1); + $hash->{HELPER}{OLDSTATE} = "reopened"; + DbLog_execmemcache($hash) if($async); } else { - InternalTimer(gettimeofday()+30, "DbLog_reopen", $hash, 0); + InternalTimer(gettimeofday()+30, "DbLog_reopen", $hash, 0); } return; @@ -5703,7 +5784,7 @@ sub DbLog_sampleDataFn($$$$$) { push (@example, join (" ",@line)) if($counter <= 8); # show max 8 examples push (@colregs, "$line[0]:$line[1]"); # push all eventTypes to selection list } - $dbhf->disconnect(); + $dbhf->disconnect(); my $cols = join(",", sort { "\L$a" cmp "\L$b" } @colregs); # $max = 8 if($max > 8); # auskommentiert 27.02.2018, Notwendigkeit unklar (forum:#76008) @@ -5727,7 +5808,7 @@ sub DbLog_sampleDataFn($$$$$) { my $ret = ""; no warnings 'uninitialized'; # Forum:74690, bug unitialized $ret .= SVG_txt("par_${r}_0", "", "$f[0]:$f[1]:$f[2]:$f[3]", 20); - use warnings; + use warnings; # $ret .= SVG_txt("par_${r}_2", "", $f[2], 1); # Default not yet implemented # $ret .= SVG_txt("par_${r}_3", "", $f[3], 3); # Function # $ret .= SVG_txt("par_${r}_4", "", $f[4], 3); # RegExp @@ -5964,7 +6045,7 @@ sub DbLog_chartQuery($@) { } my ($hash, @a) = @_; - my $dbhf = DbLog_ConnectNewDBH($hash); + my $dbhf = DbLog_ConnectNewDBH($hash); return if(!$dbhf); my $totalcount; @@ -6032,7 +6113,7 @@ sub DbLog_chartQuery($@) { } $jsonstring .= '}'; } - $dbhf->disconnect(); + $dbhf->disconnect(); $jsonstring .= ']'; if (defined $totalcount && $totalcount ne "") { $jsonstring .= ',"totalCount": '.$totalcount.'}'; @@ -6086,22 +6167,22 @@ sub DbLog_setVersionInfo($) { $hash->{HELPER}{VERSION} = $v; if($modules{$type}{META}{x_prereqs_src} && !$hash->{HELPER}{MODMETAABSENT}) { - # META-Daten sind vorhanden - $modules{$type}{META}{version} = "v".$v; # Version aus META.json überschreiben, Anzeige mit {Dumper $modules{DbLog}{META}} - if($modules{$type}{META}{x_version}) { # {x_version} ( nur gesetzt wenn $Id: 93_DbLog.pm 21051 2020-01-25 20:53:06Z DS_Starter $ im Kopf komplett! vorhanden ) - $modules{$type}{META}{x_version} =~ s/1.1.1/$v/g; - } else { - $modules{$type}{META}{x_version} = $v; - } - return $@ unless (FHEM::Meta::SetInternals($hash)); # FVERSION wird gesetzt ( nur gesetzt wenn $Id: 93_DbLog.pm 21051 2020-01-25 20:53:06Z DS_Starter $ im Kopf komplett! vorhanden ) - if(__PACKAGE__ eq "FHEM::$type" || __PACKAGE__ eq $type) { - # es wird mit Packages gearbeitet -> Perl übliche Modulversion setzen - # mit {->VERSION()} im FHEMWEB kann Modulversion abgefragt werden - use version 0.77; our $VERSION = FHEM::Meta::Get( $hash, 'version' ); + # META-Daten sind vorhanden + $modules{$type}{META}{version} = "v".$v; # Version aus META.json überschreiben, Anzeige mit {Dumper $modules{DbLog}{META}} + if($modules{$type}{META}{x_version}) { # {x_version} ( nur gesetzt wenn $Id: 93_DbLog.pm 21801 2020-04-29 09:37:37Z DS_Starter $ im Kopf komplett! vorhanden ) + $modules{$type}{META}{x_version} =~ s/1.1.1/$v/g; + } else { + $modules{$type}{META}{x_version} = $v; + } + return $@ unless (FHEM::Meta::SetInternals($hash)); # FVERSION wird gesetzt ( nur gesetzt wenn $Id: 93_DbLog.pm 21801 2020-04-29 09:37:37Z DS_Starter $ im Kopf komplett! vorhanden ) + if(__PACKAGE__ eq "FHEM::$type" || __PACKAGE__ eq $type) { + # es wird mit Packages gearbeitet -> Perl übliche Modulversion setzen + # mit {->VERSION()} im FHEMWEB kann Modulversion abgefragt werden + use version 0.77; our $VERSION = FHEM::Meta::Get( $hash, 'version' ); } } else { - # herkömmliche Modulstruktur - $hash->{VERSION} = $v; + # herkömmliche Modulstruktur + $hash->{VERSION} = $v; } return; @@ -6159,10 +6240,10 @@ sub DbLog_showChildHandles ($$$$) { The Perl-modules DBI and DBD::<dbtype> are needed to be installed (use cpan -i <module> if your distribution does not have it).

- - On a debian based system you may install these modules for instance by:

- -
    + + On a debian based system you may install these modules for instance by:

    + +
      @@ -6170,10 +6251,10 @@ sub DbLog_showChildHandles ($$$$) {
      DBI : sudo apt-get install libdbi-perl
      SQLite : sudo apt-get install sqlite3 libdbi-perl libdbd-sqlite3-perl
      PostgreSQL : sudo apt-get install libdbd-pg-perl
      -
    -
    -
    - +
+
+
+ Preparations

At first you need to install and setup the database. @@ -6198,21 +6279,21 @@ sub DbLog_showChildHandles ($$$$) { The columns have the following meaning:

-
    +
      - - - + + +
      TIMESTAMP : timestamp of event, e.g. 2007-12-30 21:45:22
      DEVICE : device name, e.g. Wetterstation
      TYPE : device type, e.g. KS300
      EVENT : event specification as full string, e.g. humidity: 71 (%)
      READING : name of reading extracted from event, e.g. humidity
      VALUE : actual reading extracted from event, e.g. 71
      UNIT : unit extracted from event, e.g. %
      READING : name of reading extracted from event, e.g. humidity
      VALUE : actual reading extracted from event, e.g. 71
      UNIT : unit extracted from event, e.g. %
      -
    -
    -
    - +
+
+
+ create index
Due to reading performance, e.g. on creation of SVG-plots, it is very important that the index "Search_Idx" or a comparable index (e.g. a primary key) is applied. @@ -6222,31 +6303,31 @@ sub DbLog_showChildHandles ($$$$) { The index "Search_Idx" can be created, e.g. in database 'fhem', by these statements (also subsequently):

-
    +
      MySQL : CREATE INDEX Search_Idx ON `fhem`.`history` (DEVICE, READING, TIMESTAMP);
      SQLite : CREATE INDEX Search_Idx ON `history` (DEVICE, READING, TIMESTAMP);
      PostgreSQL : CREATE INDEX "Search_Idx" ON history USING btree (device, reading, "timestamp");
      -
    -
    - +
+
+ For the connection to the database a configuration file is used. The configuration is stored in a separate file to avoid storing the password in the main configuration file and to have it visible in the output of the list command.

- + The configuration file should be copied e.g. to /opt/fhem and has the following structure you have to customize suitable to your conditions (decomment the appropriate raws and adjust it):

- -
+    
+    
     ####################################################################################
     # database configuration file     
-    # 	
+    #   
     # NOTE:
     # If you don't use a value for user / password please delete the leading hash mark
-    # and write 'user => ""' respectively 'password => ""' instead !	
+    # and write 'user => ""' respectively 'password => ""' instead !    
     #
     #
     ## for MySQL                                                      
@@ -6255,7 +6336,9 @@ sub DbLog_showChildHandles ($$$$) {
     #    connection => "mysql:database=fhem;host=<database host>;port=3306",       
     #    user => "fhemuser",                                          
     #    password => "fhempassword",
-    #    # optional enable(1) / disable(0) UTF-8 support (at least V 4.042 is necessary) 	
+    #    # optional enable(1) / disable(0) UTF-8 support 
+    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing 
+    #    # 4.042 is highly suggested)   
     #    utf8 => 1   
     #);                                                              
     ####################################################################################
@@ -6277,14 +6360,14 @@ sub DbLog_showChildHandles ($$$$) {
     #    password => ""                                          
     #);                                                              
     ####################################################################################
-	
- If configDB is used, the configuration file has to be uploaded into the configDB !

- +
+ If configDB is used, the configuration file has to be uploaded into the configDB !

+ Note about special characters:
If special characters, e.g. @,$ or % which have a meaning in the perl programming language are used in a password, these special characters have to be escaped. That means in this example you have to use: \@,\$ respectively \%. -
+


@@ -6299,28 +6382,28 @@ sub DbLog_showChildHandles ($$$$) { <configfilename> is the prepared configuration file.
<regexp> is identical to the specification of regex in the FileLog definition.

- + Example:
    define myDbLog DbLog /etc/fhem/db.conf .*:.*
    all events will stored into the database
-
- - After you have defined your DbLog-device it is recommended to run the configuration check

+
+ + After you have defined your DbLog-device it is recommended to run the configuration check

    set <name> configCheck
-
- - This check reports some important settings and gives recommendations back to you if proposals are indentified. -

- - DbLog distinguishes between the synchronous (default) and asynchronous logmode. The logmode is adjustable by the - attribute asyncMode. Since version 2.13.5 DbLog is supporting primary key (PK) set in table - current or history. If you want use PostgreSQL with PK it has to be at lest version 9.5. +
+ + This check reports some important settings and gives recommendations back to you if proposals are indentified.

- + + DbLog distinguishes between the synchronous (default) and asynchronous logmode. The logmode is adjustable by the + attribute asyncMode. Since version 2.13.5 DbLog is supporting primary key (PK) set in table + current or history. If you want use PostgreSQL with PK it has to be at lest version 9.5. +

+ The content of VALUE will be optimized for automated post-processing, e.g. yes is translated to 1

@@ -6329,21 +6412,21 @@ sub DbLog_showChildHandles ($$$$) { get myDbLog - - 2012-11-10 2012-11-10 KS300:temperature::
- - transfer FileLog-data to DbLog

- There is the special module 98_FileLogConvert.pm available to transfer filelog-data to the DbLog-database.
- The module can be downloaded here - or from directory ./contrib instead. - Further information and help you can find in the corresponding - Forumthread .


- - Reporting and Management of DbLog database content

- By using SVG database content can be visualized.
- Beyond that the module DbRep can be used to prepare tabular - database reports or you can manage the database content with available functions of that module. -


- Troubleshooting

+ transfer FileLog-data to DbLog

+ There is the special module 98_FileLogConvert.pm available to transfer filelog-data to the DbLog-database.
+ The module can be downloaded here + or from directory ./contrib instead. + Further information and help you can find in the corresponding + Forumthread .


+ + Reporting and Management of DbLog database content

+ By using SVG database content can be visualized.
+ Beyond that the module DbRep can be used to prepare tabular + database reports or you can manage the database content with available functions of that module. +


+ + Troubleshooting

If after successful definition the DbLog-device doesn't work as expected, the following notes may help:

@@ -6356,12 +6439,12 @@ sub DbLog_showChildHandles ($$$$) {
If the notes don't lead to success, please increase verbose level of the DbLog-device to 4 or 5 and observe entries in - logfile relating to the DbLog-device. + logfile relating to the DbLog-device. For problem analysis please post the output of "list <name>", the result of "set <name> configCheck" and the logfile entries of DbLog-device to the forum thread. -

- +

+

@@ -6374,8 +6457,8 @@ sub DbLog_showChildHandles ($$$$) {
    In asynchronous mode a new dataset is inserted to the Cache and will be processed at the next database sync cycle.

    - Example:
    - set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%
    + Example:
    + set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%

set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

@@ -6402,48 +6485,48 @@ sub DbLog_showChildHandles ($$$$) {
The database field "EVENT" will be filled with the string "addLog" automatically.
- The addLog-command dosn't create an additional event in your system !

+ The addLog-command dosn't create an additional event in your system !

- Examples:
- set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
- set <name> addLog TYPE=SSCam:state
- set <name> addLog MyWetter:(fc10.*|fc8.*)
- set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
- set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

+ Examples:
+ set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
+ set <name> addLog TYPE=SSCam:state
+ set <name> addLog MyWetter:(fc10.*|fc8.*)
+ set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
+ set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

set <name> addLog USV:state CN=di.cronjob
In the valueFn-function the caller "di.cronjob" is evaluated via the variable $CN and the timestamp is corrected:

valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ }
- + set <name> clearReadings

    This function clears readings which were created by different DbLog-functions.

set <name> commitCache

    In asynchronous mode (attribute asyncMode=1), the cached data in memory will be written into the database - and subsequently the cache will be cleared. Thereby the internal timer for the asynchronous mode Modus will be set new. + and subsequently the cache will be cleared. Thereby the internal timer for the asynchronous mode Modus will be set new. The command can be usefull in case of you want to write the cached data manually or e.g. by an AT-device on a defined - point of time into the database.

+ point of time into the database.
set <name> configCheck

    This command checks some important settings and give recommendations back to you if proposals are identified. -

- +
+ set <name> count

    Count records in tables current and history and write results into readings countCurrent and countHistory.

set <name> countNbl

    The non-blocking execution of "set <name> count".

- + set <name> deleteOldDays <n>

    Delete records from history older than <n> days. Number of deleted records will be written into reading lastRowsDeleted.

- + set <name> deleteOldDaysNbl <n>

    - Is identical to function "deleteOldDays" whereupon deleteOldDaysNbl will be executed non-blocking. + Is identical to function "deleteOldDays" whereupon deleteOldDaysNbl will be executed non-blocking.

    Note:
    @@ -6451,52 +6534,52 @@ sub DbLog_showChildHandles ($$$$) { avoid a blocking situation of FHEM !
-
+
set <name> eraseReadings

    This function deletes all readings except reading "state".

- + set <name> exportCache [nopurge | purgecache]

    If DbLog is operating in asynchronous mode, it's possible to exoprt the cache content into a textfile. - The file will be written to the directory (global->modpath)/log/ by default setting. The detination directory can be - changed by the attribute expimpdir.
    - The filename will be generated automatically and is built by a prefix "cache_", followed by DbLog-devicename and the - present timestmp, e.g. "cache_LogDB_2017-03-23_22-13-55".
    + The file will be written to the directory (global->modpath)/log/ by default setting. The detination directory can be + changed by the attribute expimpdir.
    + The filename will be generated automatically and is built by a prefix "cache_", followed by DbLog-devicename and the + present timestmp, e.g. "cache_LogDB_2017-03-23_22-13-55".
    There are two options possible, "nopurge" respectively "purgecache". The option determines whether the cache content - will be deleted after export or not. - Using option "nopurge" (default) the cache content will be preserved.
    + will be deleted after export or not. + Using option "nopurge" (default) the cache content will be preserved.
    The attribute "exportCacheAppend" defines, whether every export process creates a new export file (default) or the cache content is appended to an existing (newest) export file.

- + set <name> importCachefile <file>

    Imports an textfile into the database which has been written by the "exportCache" function. - The allocatable files will be searched in directory (global->modpath)/log/ by default and a drop-down list will be + The allocatable files will be searched in directory (global->modpath)/log/ by default and a drop-down list will be generated from the files which are found in the directory. - The source directory can be changed by the attribute expimpdir.
    - Only that files will be shown which are correlate on pattern starting with "cache_", followed by the DbLog-devicename.
    - For example a file with the name "cache_LogDB_2017-03-23_22-13-55", will match if Dblog-device has name "LogDB".
    + The source directory can be changed by the attribute expimpdir.
    + Only that files will be shown which are correlate on pattern starting with "cache_", followed by the DbLog-devicename.
    + For example a file with the name "cache_LogDB_2017-03-23_22-13-55", will match if Dblog-device has name "LogDB".
    After the import has been successfully done, a prefix "impdone_" will be added at begin of the filename and this file ddoesn't appear on the drop-down list anymore.
    - If you want to import a cachefile from another source database, you may adapt the filename so it fits the search criteria + If you want to import a cachefile from another source database, you may adapt the filename so it fits the search criteria "DbLog-Device" in its name. After renaming the file appeares again on the drop-down list.

- set <name> listCache

+ set <name> listCache

    If DbLog is set to asynchronous mode (attribute asyncMode=1), you can use that command to list the events are cached in memory.

set <name> purgeCache

    In asynchronous mode (attribute asyncMode=1), the in memory cached data will be deleted. With this command data won't be written from cache into the database.

- + set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

    Reduces records older than <no> days and (optional) newer than <nn> days to one record (the 1st) each hour per device & reading.
    Within the device/reading name SQL-Wildcards "%" and "_" can be used.

    - With the optional argument 'average' not only the records will be reduced, but all numerical values of an hour - will be reduced to a single average.
    + With the optional argument 'average' not only the records will be reduced, but all numerical values of an hour + will be reduced to a single average.
    With the optional argument 'average=day' not only the records will be reduced, but all numerical values of a - day will be reduced to a single average. (implies 'average')

    + day will be reduced to a single average. (implies 'average')

    You can optional set the last argument to "exclude=device1:reading1,device2:reading2,..." to exclude device/readings from reduceLog.
    @@ -6510,10 +6593,10 @@ sub DbLog_showChildHandles ($$$$) {
    CAUTION: It is strongly recommended to check if the default INDEX 'Search_Idx' exists on the table 'history'!
    - The execution of this command may take (without INDEX) extremely long. FHEM will be blocked completely after issuing the command to completion !

    + The execution of this command may take (without INDEX) extremely long. FHEM will be blocked completely after issuing the command to completion !


- + set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

    Same function as "set <name> reduceLog" but FHEM won't be blocked due to this function is implemented non-blocking !

    @@ -6526,19 +6609,19 @@ sub DbLog_showChildHandles ($$$$) { set <name> reopen [n]

      Perform a database disconnect and immediate reconnect to clear cache and flush journal file if no time [n] was set.
      - If optionally a delay time of [n] seconds was set, the database connection will be disconnect immediately but it was only reopened - after [n] seconds. In synchronous mode the events won't saved during that time. In asynchronous mode the events will be - stored in the memory cache and saved into database after the reconnect was done.

    + If optionally a delay time of [n] seconds was set, the database connection will be disconnect immediately but it was only reopened + after [n] seconds. In synchronous mode the events won't saved during that time. In asynchronous mode the events will be + stored in the memory cache and saved into database after the reconnect was done.

set <name> rereadcfg

    Perform a database disconnect and immediate reconnect to clear cache and flush journal file.
    Probably same behavior als reopen, but rereadcfg will read the configuration data before reconnect.

- + set <name> userCommand <validSqlStatement>

    Performs simple sql select statements on the connected database. Usercommand and result will be written into - corresponding readings.
    - The result can only be a single line. + corresponding readings.
    + The result can only be a single line. The execution of SQL-Statements in DbLog is deprecated. Therefore the analysis module DbRep should be used.

@@ -6737,14 +6820,14 @@ sub DbLog_showChildHandles ($$$$) {
  • addStateEvent
      - attr <device> addStateEvent [0|1] -
      + attr <device> addStateEvent [0|1] +
      As you probably know the event associated with the state Reading is special, as the "state: " string is stripped, i.e event is not "state: on" but just "on".
      - Mostly it is desireable to get the complete event without "state: " stripped, so it is the default behavior of DbLog. - That means you will get state-event complete as "state: xxx".
      - In some circumstances, e.g. older or special modules, it is a good idea to set addStateEvent to "0". - Try it if you have trouble with the default adjustment. + Mostly it is desireable to get the complete event without "state: " stripped, so it is the default behavior of DbLog. + That means you will get state-event complete as "state: xxx".
      + In some circumstances, e.g. older or special modules, it is a good idea to set addStateEvent to "0". + Try it if you have trouble with the default adjustment.
  • @@ -6755,18 +6838,18 @@ sub DbLog_showChildHandles ($$$$) {
  • asyncMode
      - attr <device> asyncMode [1|0] -
      - + attr <device> asyncMode [1|0] +
      + This attribute determines the operation mode of DbLog. If asynchronous mode is active (asyncMode=1), the events which should be saved - at first will be cached in memory. After synchronisation time cycle (attribute syncInterval), or if the count limit of datasets in cache - is reached (attribute cacheLimit), the cached events get saved into the database using bulk insert. - If the database isn't available, the events will be cached in memeory furthermore, and tried to save into database again after - the next synchronisation time cycle if the database is available.
      - In asynchronous mode the data insert into database will be executed non-blocking by a background process. - You can adjust the timeout value for this background process by attribute "timeout" (default 86400s).
      - In synchronous mode (normal mode) the events won't be cached im memory and get saved into database immediately. If the database isn't - available the events are get lost.
      + at first will be cached in memory. After synchronisation time cycle (attribute syncInterval), or if the count limit of datasets in cache + is reached (attribute cacheLimit), the cached events get saved into the database using bulk insert. + If the database isn't available, the events will be cached in memeory furthermore, and tried to save into database again after + the next synchronisation time cycle if the database is available.
      + In asynchronous mode the data insert into database will be executed non-blocking by a background process. + You can adjust the timeout value for this background process by attribute "timeout" (default 86400s).
      + In synchronous mode (normal mode) the events won't be cached im memory and get saved into database immediately. If the database isn't + available the events are get lost.
  • @@ -6776,9 +6859,9 @@ sub DbLog_showChildHandles ($$$$) {
  • bulkInsert
      - attr <device> bulkInsert [1|0] -
      - + attr <device> bulkInsert [1|0] +
      + Toggles the Insert mode between Array (default) and Bulk. This Bulk insert mode increase the write performance into the history table significant in case of plenty of data to insert, especially if asynchronous mode is used. @@ -6793,21 +6876,21 @@ sub DbLog_showChildHandles ($$$$) {
    • commitMode
        - attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] -
        - + attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] +
        + Change the usage of database autocommit- and/or transaction- behavior.
        If transaction "off" is used, not saved datasets are not returned to cache in asynchronous mode.
        - This attribute is an advanced feature and should only be used in a concrete situation or support case.

        - -
          + This attribute is an advanced feature and should only be used in a concrete situation or support case.

          + +
          • basic_ta:on - autocommit server basic setting / transaktion on (default)
          • -
          • basic_ta:off - autocommit server basic setting / transaktion off
          • -
          • ac:on_ta:on - autocommit on / transaktion on
          • -
          • ac:on_ta:off - autocommit on / transaktion off
          • -
          • ac:off_ta:on - autocommit off / transaktion on (autocommit "off" set transaktion "on" implicitly)
          • -
          - +
        • basic_ta:off - autocommit server basic setting / transaktion off
        • +
        • ac:on_ta:on - autocommit on / transaktion on
        • +
        • ac:on_ta:off - autocommit on / transaktion off
        • +
        • ac:off_ta:on - autocommit off / transaktion on (autocommit "off" set transaktion "on" implicitly)
        • +
        +
    @@ -6817,14 +6900,14 @@ sub DbLog_showChildHandles ($$$$) {
  • cacheEvents
      - attr <device> cacheEvents [2|1|0] -
      -
        + attr <device> cacheEvents [2|1|0] +
        +
        • cacheEvents=1: creates events of reading CacheUsage at point of time when a new dataset has been added to the cache.
        • -
        • cacheEvents=2: creates events of reading CacheUsage at point of time when in aychronous mode a new write cycle to the - database starts. In that moment CacheUsage contains the number of datasets which will be written to - the database.

        • -
        +
      • cacheEvents=2: creates events of reading CacheUsage at point of time when in aychronous mode a new write cycle to the + database starts. In that moment CacheUsage contains the number of datasets which will be written to + the database.

      • +
  • @@ -6834,13 +6917,13 @@ sub DbLog_showChildHandles ($$$$) {
  • cacheLimit
      - - attr <device> cacheLimit <n> -
      - + + attr <device> cacheLimit <n> +
      + In asynchronous logging mode the content of cache will be written into the database and cleared if the number <n> datasets - in cache has reached (default: 500). Thereby the timer of asynchronous logging mode will be set new to the value of - attribute "syncInterval". In case of error the next write attempt will be started at the earliest after syncInterval/2.
      + in cache has reached (default: 500). Thereby the timer of asynchronous logging mode will be set new to the value of + attribute "syncInterval". In case of error the next write attempt will be started at the earliest after syncInterval/2.
  • @@ -6850,15 +6933,15 @@ sub DbLog_showChildHandles ($$$$) {
  • colEvent
      - - attr <device> colEvent <n> -
      - - The field length of database field EVENT will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field - EVENT won't be filled .
      - Note:
      - If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
      + + attr <device> colEvent <n> +
      + + The field length of database field EVENT will be adjusted. By this attribute the default value in the DbLog-device can be + adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field + EVENT won't be filled .
      + Note:
      + If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
  • @@ -6868,15 +6951,15 @@ sub DbLog_showChildHandles ($$$$) {
  • colReading
      - - attr <device> colReading <n> -
      - - The field length of database field READING will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colReading=0 is set, the database field - READING won't be filled .
      - Note:
      - If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
      + + attr <device> colReading <n> +
      + + The field length of database field READING will be adjusted. By this attribute the default value in the DbLog-device can be + adjusted if the field length in the databse was changed nanually. If colReading=0 is set, the database field + READING won't be filled .
      + Note:
      + If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
  • @@ -6886,15 +6969,15 @@ sub DbLog_showChildHandles ($$$$) {
  • colValue
      - - attr <device> colValue <n> -
      - - The field length of database field VALUE will be adjusted. By this attribute the default value in the DbLog-device can be - adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field - VALUE won't be filled .
      - Note:
      - If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
      + + attr <device> colValue <n> +
      + + The field length of database field VALUE will be adjusted. By this attribute the default value in the DbLog-device can be + adjusted if the field length in the databse was changed nanually. If colEvent=0 is set, the database field + VALUE won't be filled .
      + Note:
      + If the attribute is set, all of the field length limits are valid also for SQLite databases as noticed in Internal COLUMNS !
  • @@ -6904,36 +6987,36 @@ sub DbLog_showChildHandles ($$$$) {
  • DbLogType
      - - attr <device> DbLogType [Current|History|Current/History] -
      - + + attr <device> DbLogType [Current|History|Current/History] +
      + This attribute determines which table or which tables in the database are wanted to use. If the attribute isn't set, - the adjustment history will be used as default.
      - - - The meaning of the adjustments in detail are:

      - -
        + the adjustment history will be used as default.
        + + + The meaning of the adjustments in detail are:

        + +
          + The entries of current-table will evaluated with SVG-creation. + SVG-creation. - + + DbRep-device command + "set <DbRep-name> tableCurrentFillup" (advanced feature).
          Current Events are only logged into the current-table. - The entries of current-table will evaluated with SVG-creation.
          History Events are only logged into the history-table. No dropdown list with proposals will created with the - SVG-creation.
          Current/History Events will be logged both the current- and the history-table. - The entries of current-table will evaluated with SVG-creation.
          SampleFill/History Events are only logged into the history-table. The entries of current-table will evaluated with SVG-creation + The entries of current-table will evaluated with SVG-creation.
          SampleFill/History Events are only logged into the history-table. The entries of current-table will evaluated with SVG-creation and can be filled up with a customizable extract of the history-table by using a - DbRep-device command - "set <DbRep-name> tableCurrentFillup" (advanced feature).
          -
        -
        -
        - - Note:
        - The current-table has to be used to get a Device:Reading-DropDown list when a SVG-Plot will be created.
        +
      +
      +
      + + Note:
      + The current-table has to be used to get a Device:Reading-DropDown list when a SVG-Plot will be created.
  • @@ -6943,10 +7026,10 @@ sub DbLog_showChildHandles ($$$$) {
  • DbLogSelectionMode
      - - attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] -
      - + + attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] +
      + Thise DbLog-Device-Attribute specifies how the device specific Attributes DbLogExclude and DbLogInclude are handled. If this Attribute is missing it defaults to "Exclude".

        @@ -6971,7 +7054,7 @@ sub DbLog_showChildHandles ($$$$) { attr <device> DbLogInclude regex:MinInterval[:force],[regex:MinInterval[:force]] ...
        - + A new Attribute DbLogInclude will be propagated to all Devices if DBLog is used. DbLogInclude works just like DbLogExclude but to include matching readings. If a MinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. @@ -6980,8 +7063,8 @@ sub DbLog_showChildHandles ($$$$) { See also the attributes defaultMinInterval and DbLogSelectionMode of DbLog device which takes influence on how DbLogExclude and DbLogInclude are handled.

        - - Example
        + + Example
        attr MyDevice1 DbLogInclude .*
        attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300,battery:3600
        attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300:force,battery:3600:force @@ -6997,9 +7080,9 @@ sub DbLog_showChildHandles ($$$$) { attr <device> DbLogExclude regex:MinInterval[:force],[regex:MinInterval[:force]] ...
        - + A new attribute DbLogExclude will be propagated to all devices if DBLog is used. - DbLogExclude will work as regexp to exclude defined readings to log. Each individual regexp-group are separated by + DbLogExclude will work as regexp to exclude defined readings to log. Each individual regexp-group are separated by comma. If a MinInterval is set, the logentry is dropped if the defined interval is not reached and the value vs. lastvalue is equal. If the optional parameter "force" is set, the logentry is also dropped even though the value is not @@ -7008,7 +7091,7 @@ sub DbLog_showChildHandles ($$$$) { how DbLogExclude and DbLogInclude are handled.

        - Example
        + Example
        attr MyDevice1 DbLogExclude .*
        attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300,battery:3600
        attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300:force,battery:3600:force @@ -7021,24 +7104,25 @@ sub DbLog_showChildHandles ($$$$) {
      • DbLogValueFn
          - - attr <device> DbLogValueFn {} -
          + + attr <device> DbLogValueFn {} +
          The attribute DbLogValueFn will be propagated to all devices if DbLog is used. - This attribute contains a Perl expression that can use and change values of $TIMESTAMP, $READING, $VALUE (value of - reading) and $UNIT (unit of reading value). That means the changed values are logged. - You also have readonly access to $DEVICE (the source device name) and $EVENT for evaluation in your expression.
          - $DEVICE and $EVENT cannot be changed.
          - If $TIMESTAMP should be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't - be changed. - In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.
          + This attribute contains a Perl expression that can use and change values of $TIMESTAMP, $READING, $VALUE (value of + reading) and $UNIT (unit of reading value). That means the changed values are logged.
          + Furthermore you have readonly access to $DEVICE (the source device name), $EVENT, $LASTTIMESTAMP and $LASTVALUE + for evaluation in your expression. + The variables $LASTTIMESTAMP and $LASTVALUE contain time and value of the last logged dataset of $DEVICE / $READING.
          + If the $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't + be changed. + In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.
          The device specific function in "DbLogValueFn" is applied to the dataset before the potential existing attribute "valueFn" in the DbLog device.

          - - Example
          + + Example
           attr SMA_Energymeter DbLogValueFn
           { 
          @@ -7049,7 +7133,7 @@ attr SMA_Energymeter DbLogValueFn
               $IGNORE=1;
             }
           }
          -	   
          +
      @@ -7061,7 +7145,7 @@ attr SMA_Energymeter DbLogValueFn attr <device> dbSchema <schema>
      - + This attribute is available for database types MySQL/MariaDB and PostgreSQL. The table names (current/history) are extended by its database schema. It is an advanced feature and normally not necessary to set.
      @@ -7087,13 +7171,13 @@ attr SMA_Energymeter DbLogValueFn and are not overwritten by this attribute.
      This attribute can be specified as multiline input.

      - Examples
      + Examples
      attr dblog defaultMinInterval .*::120::force
      - # Events of all devices are logged only in case of 120 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.
      + # Events of all devices are logged only in case of 120 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.
      attr dblog defaultMinInterval (Weather|SMA)::300
      - # Events of devices "Weather" and "SMA" are logged only in case of 300 seconds are elapsed to the last log entry (reading specific) and the value is equal to the last logged value.
      + # Events of devices "Weather" and "SMA" are logged only in case of 300 seconds are elapsed to the last log entry (reading specific) and the value is equal to the last logged value.
      attr dblog defaultMinInterval TYPE=CUL_HM::600::force
      - # Events of all devices of Type "CUL_HM" are logged only in case of 600 seconds are elapsed to the last log entry (reading specific) independent of a possible value change. + # Events of all devices of Type "CUL_HM" are logged only in case of 600 seconds are elapsed to the last log entry (reading specific) independent of a possible value change.
  • @@ -7106,7 +7190,7 @@ attr SMA_Energymeter DbLogValueFn attr <device> disable [0|1]
    - + Disables the DbLog device (1) or enables it (0).
    @@ -7118,31 +7202,31 @@ attr SMA_Energymeter DbLogValueFn
  • excludeDevs
      - - attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> -
      + + attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> +
      - The device/reading-combinations "devspec1#Reading", "devspec2#Reading" up to "devspec.." are globally excluded from + The device/reading-combinations "devspec1#Reading", "devspec2#Reading" up to "devspec.." are globally excluded from logging into the database.
      The specification of a reading is optional.
      - Thereby devices are explicit and consequently excluded from logging without consideration of another excludes or + Thereby devices are explicit and consequently excluded from logging without consideration of another excludes or includes (e.g. in DEF). The devices to exclude can be specified as device-specification.

      - - Examples
      + + Examples
      - attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog -
      - # The devices global respectively devices starting with "Log" or "Cam" and devices with Type=DbLog are excluded from database logging.
      + attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog +
      + # The devices global respectively devices starting with "Log" or "Cam" and devices with Type=DbLog are excluded from database logging.
      - attr <device> excludeDevs .*#.*Wirkleistung.* -
      - # All device/reading-combinations which contain "Wirkleistung" in reading are excluded from logging.
      + attr <device> excludeDevs .*#.*Wirkleistung.* +
      + # All device/reading-combinations which contain "Wirkleistung" in reading are excluded from logging.
      attr <device> excludeDevs SMA_Energymeter#Bezug_WirkP_Zaehler_Diff -
      - # The event containing device "SMA_Energymeter" and reading "Bezug_WirkP_Zaehler_Diff" are excluded from logging.
      +
      + # The event containing device "SMA_Energymeter" and reading "Bezug_WirkP_Zaehler_Diff" are excluded from logging.
  • @@ -7152,19 +7236,19 @@ attr SMA_Energymeter DbLogValueFn
  • expimpdir
      - - attr <device> expimpdir <directory> -
      + + attr <device> expimpdir <directory> +
      - If the cache content will be exported by "exportCache" or the "importCachefile" - command, the file will be written into or read from that directory. The default directory is - "(global->modpath)/log/". - Make sure the specified directory is existing and writable.

      - - Example
      + If the cache content will be exported by "exportCache" or the "importCachefile" + command, the file will be written into or read from that directory. The default directory is + "(global->modpath)/log/". + Make sure the specified directory is existing and writable.

      + + Example
      - attr <device> expimpdir /opt/fhem/cache/ -
      + attr <device> expimpdir /opt/fhem/cache/ +
  • @@ -7174,10 +7258,10 @@ attr SMA_Energymeter DbLogValueFn
  • exportCacheAppend
      - - attr <device> exportCacheAppend [1|0] -
      - + + attr <device> exportCacheAppend [1|0] +
      + If set, the export of cache ("set <device> exportCache") appends the content to the newest available export file. If there is no exististing export file, it will be new created.
      If the attribute not set, every export process creates a new export file . (default)
      @@ -7190,10 +7274,10 @@ attr SMA_Energymeter DbLogValueFn
    • noNotifyDev
        - - attr <device> noNotifyDev [1|0] -
        - + + attr <device> noNotifyDev [1|0] +
        + Enforces that NOTIFYDEV won't set and hence won't used.
    • @@ -7204,10 +7288,10 @@ attr SMA_Energymeter DbLogValueFn
    • noSupportPK
        - - attr <device> noSupportPK [1|0] -
        - + + attr <device> noSupportPK [1|0] +
        + Deactivates the support of a set primary key by the module.
    • @@ -7218,9 +7302,9 @@ attr SMA_Energymeter DbLogValueFn
    • syncEvents
        - attr <device> syncEvents [1|0] -
        - + attr <device> syncEvents [1|0] +
        + events of reading syncEvents will be created.
    • @@ -7231,12 +7315,12 @@ attr SMA_Energymeter DbLogValueFn
    • showproctime
        - attr <device> [1|0] -
        - + attr <device> [1|0] +
        + If set, the reading "sql_processing_time" shows the required execution time (in seconds) for the sql-requests. This is not calculated - for a single sql-statement, but the summary of all sql-statements necessary for within an executed DbLog-function in background. - The reading "background_processing_time" shows the total time used in background.
        + for a single sql-statement, but the summary of all sql-statements necessary for within an executed DbLog-function in background. + The reading "background_processing_time" shows the total time used in background.
    @@ -7246,13 +7330,13 @@ attr SMA_Energymeter DbLogValueFn
  • showNotifyTime
      - attr <device> showNotifyTime [1|0] -
      - - If set, the reading "notify_processing_time" shows the required execution time (in seconds) in the DbLog - Notify-function. This attribute is practical for performance analyses and helps to determine the differences of time + attr <device> showNotifyTime [1|0] +
      + + If set, the reading "notify_processing_time" shows the required execution time (in seconds) in the DbLog + Notify-function. This attribute is practical for performance analyses and helps to determine the differences of time required when the operation mode was switched from synchronous to the asynchronous mode.
      - +
  • @@ -7262,12 +7346,12 @@ attr SMA_Energymeter DbLogValueFn
  • syncInterval
      - attr <device> syncInterval <n> -
      - + attr <device> syncInterval <n> +
      + If DbLog is set to asynchronous operation mode (attribute asyncMode=1), with this attribute you can setup the interval in seconds used for storage the in memory cached events into the database. THe default value is 30 seconds.
      - +
  • @@ -7277,9 +7361,9 @@ attr SMA_Energymeter DbLogValueFn
  • suppressAddLogV3
      - attr <device> suppressAddLogV3 [1|0] -
      - + attr <device> suppressAddLogV3 [1|0] +
      + If set, verbose 3 Logfileentries done by the addLog-function will be suppressed.
  • @@ -7290,12 +7374,12 @@ attr SMA_Energymeter DbLogValueFn
  • suppressUndef
      - - attr <device> suppressUndef -
      + + attr <device> suppressUndef +
      suppresses all undef values when returning data from the DB via get

      - Example
      + Example
      #DbLog eMeter:power:::$val=($val>1500)?undef:$val
  • @@ -7306,9 +7390,9 @@ attr SMA_Energymeter DbLogValueFn
  • timeout
      - - attr <device> timeout <n> -
      + + attr <device> timeout <n> +
      setup timeout of the write cycle into database in asynchronous mode (default 86400s)
  • @@ -7319,13 +7403,13 @@ attr SMA_Energymeter DbLogValueFn
  • traceFlag
      - - attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> -
      + + attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> +
      Trace flags are used to enable tracing of specific activities within the DBI and drivers. The attribute is only used for tracing of errors in case of support.

      -
        +
          @@ -7336,8 +7420,8 @@ attr SMA_Energymeter DbLogValueFn
          ALL turn on all DBI and driver flags
          TXN trace transactions
          -
        -
        +
      +
  • @@ -7348,12 +7432,12 @@ attr SMA_Energymeter DbLogValueFn
  • traceHandles
      - attr <device> traceHandles <n> -
      - + attr <device> traceHandles <n> +
      + If set, every <n> seconds the system wide existing database handles are printed out into the logfile. This attribute is only relevant in case of support. (default: 0 = switch off)
      - +
  • @@ -7363,14 +7447,14 @@ attr SMA_Energymeter DbLogValueFn
  • traceLevel
      - - attr <device> traceLevel <0|1|2|3|4|5|6|7> -
      + + attr <device> traceLevel <0|1|2|3|4|5|6|7> +
      Switch on the tracing function of the module.
      Caution ! The attribute is only used for tracing errors or in case of support. If switched on very much entries will be written into the FHEM Logfile !

      -
        +
          @@ -7382,8 +7466,8 @@ attr SMA_Energymeter DbLogValueFn
          0 Trace disabled. (Default)
          5-7 As above but with more and more internal information.
          -
        -
        +
      +
  • @@ -7394,12 +7478,12 @@ attr SMA_Energymeter DbLogValueFn
  • useCharfilter
      - - attr <device> useCharfilter [0|1] -
      + + attr <device> useCharfilter [0|1] +
      If set, only ASCII characters from 32 to 126 are accepted in event. - That are the characters " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~" .
      - Mutated vowel and "€" are transcribed (e.g. ä to ae). (default: 0).
      + That are the characters " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~" .
      + Mutated vowel and "€" are transcribed (e.g. ä to ae). (default: 0).
  • @@ -7409,30 +7493,31 @@ attr SMA_Energymeter DbLogValueFn
  • valueFn
      - - attr <device> valueFn {} -
      + + attr <device> valueFn {} +
      - The attribute contains a Perl expression that can use and change values of $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, - $VALUE (value of reading) and $UNIT (unit of reading value). - You also have readonly-access to $EVENT for evaluation in your expression.
      - If $TIMESTAMP should be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't - be changed. - In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.

      - - Examples
      + The attribute contains a Perl expression that can use and change values of $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, + $VALUE (value of reading) and $UNIT (unit of reading value).
      + Furthermore you have readonly access to $EVENT, $LASTTIMESTAMP and $LASTVALUE for evaluation in your expression. + The variables $LASTTIMESTAMP and $LASTVALUE contain time and value of the last logged dataset of $DEVICE / $READING.
      + If $TIMESTAMP is to be changed, it must meet the condition "yyyy-mm-dd hh:mm:ss", otherwise the $timestamp wouldn't + be changed. + In addition you can set the variable $IGNORE=1 if you want skip a dataset from logging.

      + + Examples
      - attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} -
      - # change value "off" to "0" of device "living_Clima" and rounds value of e-power to 1f

      - - attr <device> valueFn {if ($DEVICE eq "SMA_Energymeter" && $READING eq "state"){$IGNORE=1;}} -
      - # don't log the dataset of device "SMA_Energymeter" if the reading is "state"

      - - attr <device> valueFn {if ($DEVICE eq "Dum.Energy" && $READING eq "TotalConsumption"){$UNIT="W";}} -
      - # set the unit of device "Dum.Energy" to "W" if reading is "TotalConsumption"

      + attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} +
      + # change value "off" to "0" of device "living_Clima" and rounds value of e-power to 1f

      + + attr <device> valueFn {if ($DEVICE eq "SMA_Energymeter" && $READING eq "state"){$IGNORE=1;}} +
      + # don't log the dataset of device "SMA_Energymeter" if the reading is "state"

      + + attr <device> valueFn {if ($DEVICE eq "Dum.Energy" && $READING eq "TotalConsumption"){$UNIT="W";}} +
      + # set the unit of device "Dum.Energy" to "W" if reading is "TotalConsumption"

  • @@ -7442,20 +7527,20 @@ attr SMA_Energymeter DbLogValueFn
  • verbose4Devs
      - - attr <device> verbose4Devs <device1>,<device2>,<device..> -
      + + attr <device> verbose4Devs <device1>,<device2>,<device..> +
      - If verbose level 4 is used, only output of devices set in this attribute will be reported in FHEM central logfile. If this attribute - isn't set, output of all relevant devices will be reported if using verbose level 4. - The given devices are evaluated as Regex.

      - - Example
      + If verbose level 4 is used, only output of devices set in this attribute will be reported in FHEM central logfile. If this attribute + isn't set, output of all relevant devices will be reported if using verbose level 4. + The given devices are evaluated as Regex.

      + + Example
      - attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global -
      - # The devices starting with "sys", "Cam" respectively devices are containing "5000" in its name and the device "global" will be reported in FHEM - central Logfile if verbose=4 is set.
      + attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global +
      + # The devices starting with "sys", "Cam" respectively devices are containing "5000" in its name and the device "global" will be reported in FHEM + central Logfile if verbose=4 is set.
  • @@ -7477,10 +7562,10 @@ attr SMA_Energymeter DbLogValueFn Die Perl-Module DBI und DBD::<dbtype> müssen installiert werden (use cpan -i <module> falls die eigene Distribution diese nicht schon mitbringt).

    - - Auf einem Debian-System können diese Module z.Bsp. installiert werden mit:

    - -
      + + Auf einem Debian-System können diese Module z.Bsp. installiert werden mit:

      + +
        @@ -7488,10 +7573,10 @@ attr SMA_Energymeter DbLogValueFn
        DBI : sudo apt-get install libdbi-perl
        SQLite : sudo apt-get install sqlite3 libdbi-perl libdbd-sqlite3-perl
        PostgreSQL : sudo apt-get install libdbd-pg-perl
        -
      -
      -
      - +
    +
    +
    + Vorbereitungen

    Zunächst muss die Datenbank installiert und angelegt werden. @@ -7518,57 +7603,57 @@ attr SMA_Energymeter DbLogValueFn Die Tabellenspalten haben folgende Bedeutung:

    -
      +
        - - - + + +
        TIMESTAMP : Zeitpunkt des Events, z.B. 2007-12-30 21:45:22
        DEVICE : Name des Devices, z.B. Wetterstation
        TYPE : Type des Devices, z.B. KS300
        EVENT : das auftretende Event als volle Zeichenkette, z.B. humidity: 71 (%)
        READING : Name des Readings, ermittelt aus dem Event, z.B. humidity
        VALUE : aktueller Wert des Readings, ermittelt aus dem Event, z.B. 71
        UNIT : Einheit, ermittelt aus dem Event, z.B. %
        READING : Name des Readings, ermittelt aus dem Event, z.B. humidity
        VALUE : aktueller Wert des Readings, ermittelt aus dem Event, z.B. 71
        UNIT : Einheit, ermittelt aus dem Event, z.B. %
        -
      -
      -
      - +
    +
    +
    + Index anlegen
    Für die Leseperformance, z.B. bei der Erstellung von SVG-PLots, ist es von besonderer Bedeutung dass der Index "Search_Idx" oder ein vergleichbarer Index (z.B. ein Primary Key) angelegt ist.

    Der Index "Search_Idx" kann mit diesen Statements, z.B. in der Datenbank 'fhem', angelegt werden (auch nachträglich):

    -
      +
        MySQL : CREATE INDEX Search_Idx ON `fhem`.`history` (DEVICE, READING, TIMESTAMP);
        SQLite : CREATE INDEX Search_Idx ON `history` (DEVICE, READING, TIMESTAMP);
        PostgreSQL : CREATE INDEX "Search_Idx" ON history USING btree (device, reading, "timestamp");
        -
      -
      +
    +
    Der Code zur Anlage ist ebenfalls in den Scripten SVN -> contrib/dblog/db_create_<DBType>.sql enthalten.

    - + Für die Verbindung zur Datenbank wird eine Konfigurationsdatei verwendet. Die Konfiguration ist in einer sparaten Datei abgelegt um das Datenbankpasswort nicht in Klartext in der FHEM-Haupt-Konfigurationsdatei speichern zu müssen. Ansonsten wäre es mittels des list Befehls einfach auslesbar.

    - + Die Konfigurationsdatei wird z.B. nach /opt/fhem kopiert und hat folgenden Aufbau, den man an seine Umgebung anpassen muß (entsprechende Zeilen entkommentieren und anpassen):

    - -
    +    
    +    
         ####################################################################################
         # database configuration file     
    -    # 	
    +    #   
         # NOTE:
         # If you don't use a value for user / password please delete the leading hash mark
    -    # and write 'user => ""' respectively 'password => ""' instead !	
    +    # and write 'user => ""' respectively 'password => ""' instead !    
         #
         #
         ## for MySQL                                                      
    @@ -7577,7 +7662,9 @@ attr SMA_Energymeter DbLogValueFn
         #    connection => "mysql:database=fhem;host=<database host>;port=3306",    
         #    user => "fhemuser",                                          
         #    password => "fhempassword",
    -    #    # optional enable(1) / disable(0) UTF-8 support (at least V 4.042 is necessary) 	
    +    #    # optional enable(1) / disable(0) UTF-8 support 
    +    #    # (full UTF-8 support exists from DBD::mysql version 4.032, but installing 
    +    #    # 4.042 is highly suggested)    
         #    utf8 => 1   
         #);                                                              
         ####################################################################################
    @@ -7599,14 +7686,14 @@ attr SMA_Energymeter DbLogValueFn
         #    password => ""                                          
         #);                                                              
         ####################################################################################
    -	
    - Wird configDB genutzt, ist das Konfigurationsfile in die configDB hochzuladen !

    +
    + Wird configDB genutzt, ist das Konfigurationsfile in die configDB hochzuladen !

    Hinweis zu Sonderzeichen:
    Werden Sonderzeichen, wie z.B. @, $ oder %, welche eine programmtechnische Bedeutung in Perl haben im Passwort verwendet, sind diese Zeichen zu escapen. Das heißt in diesem Beispiel wäre zu verwenden: \@,\$ bzw. \%. -
    +


    @@ -7614,60 +7701,60 @@ attr SMA_Energymeter DbLogValueFn Define

      - + define <name> DbLog <configfilename> <regexp>

      <configfilename> ist die vorbereitete Konfigurationsdatei.
      <regexp> ist identisch FileLog der Filelog-Definition.

      - + Beispiel:
        define myDbLog DbLog /etc/fhem/db.conf .*:.*
        speichert alles in der Datenbank
      -
      - - Nachdem das DbLog-Device definiert wurde, ist empfohlen einen Konfigurationscheck auszuführen:

      +
      + + Nachdem das DbLog-Device definiert wurde, ist empfohlen einen Konfigurationscheck auszuführen:

        set <name> configCheck
      -
      - Dieser Check prüft einige wichtige Einstellungen des DbLog-Devices und gibt Empfehlungen für potentielle Verbesserungen. -

      -
      - - DbLog unterscheidet den synchronen (Default) und asynchronen Logmodus. Der Logmodus ist über das - Attribut asyncMode einstellbar. Ab Version 2.13.5 unterstützt DbLog einen gesetzten - Primary Key (PK) in den Tabellen Current und History. Soll PostgreSQL mit PK genutzt werden, muss PostgreSQL mindestens - Version 9.5 sein. +
      + Dieser Check prüft einige wichtige Einstellungen des DbLog-Devices und gibt Empfehlungen für potentielle Verbesserungen. +

      +
      + + DbLog unterscheidet den synchronen (Default) und asynchronen Logmodus. Der Logmodus ist über das + Attribut asyncMode einstellbar. Ab Version 2.13.5 unterstützt DbLog einen gesetzten + Primary Key (PK) in den Tabellen Current und History. Soll PostgreSQL mit PK genutzt werden, muss PostgreSQL mindestens + Version 9.5 sein.

      - - Der gespeicherte Wert des Readings wird optimiert für eine automatisierte Nachverarbeitung, z.B. yes wird transformiert - nach 1.

      - Die gespeicherten Werte können mittels GET Funktion angezeigt werden: + Der gespeicherte Wert des Readings wird optimiert für eine automatisierte Nachverarbeitung, z.B. yes wird transformiert + nach 1.

      + + Die gespeicherten Werte können mittels GET Funktion angezeigt werden:
        get myDbLog - - 2012-11-10 2012-11-10 KS300:temperature

      - - FileLog-Dateien nach DbLog übertragen

      + + FileLog-Dateien nach DbLog übertragen

      Zur Übertragung von vorhandenen Filelog-Daten in die DbLog-Datenbank steht das spezielle Modul 98_FileLogConvert.pm zur Verfügung.
      - Dieses Modul kann hier - bzw. aus dem Verzeichnis ./contrib geladen werden. - Weitere Informationen und Hilfestellung gibt es im entsprechenden - Forumthread .


      - - Reporting und Management von DbLog-Datenbankinhalten

      - Mit Hilfe SVG können Datenbankinhalte visualisiert werden.
      - Darüber hinaus kann das Modul DbRep genutzt werden um tabellarische - Datenbankauswertungen anzufertigen oder den Datenbankinhalt mit den zur Verfügung stehenden Funktionen zu verwalten. -


      + Dieses Modul kann hier + bzw. aus dem Verzeichnis ./contrib geladen werden. + Weitere Informationen und Hilfestellung gibt es im entsprechenden + Forumthread .


      - Troubleshooting

      + Reporting und Management von DbLog-Datenbankinhalten

      + Mit Hilfe SVG können Datenbankinhalte visualisiert werden.
      + Darüber hinaus kann das Modul DbRep genutzt werden um tabellarische + Datenbankauswertungen anzufertigen oder den Datenbankinhalt mit den zur Verfügung stehenden Funktionen zu verwalten. +


      + + Troubleshooting

      Wenn nach der erfolgreichen Definition das DbLog-Device nicht wie erwartet arbeitet, können folgende Hinweise hilfreich sein:

      @@ -7684,8 +7771,8 @@ attr SMA_Energymeter DbLogValueFn Zur Problemanalyse bitte die Ausgabe von "list <name>", das Ergebnis von "set <name> configCheck" und die Ausgaben des DbLog-Device im Logfile im Forumthread posten. -

      - +

      +


    @@ -7698,10 +7785,10 @@ attr SMA_Energymeter DbLogValueFn
      Im asynchronen Modus wird ein neuer Datensatz in den Cache eingefügt und beim nächsten Synclauf mit abgearbeitet.

      - Beispiel:
      - set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%
      + Beispiel:
      + set <name> addCacheLine 2017-12-05 17:03:59|MaxBathRoom|MAX|valveposition: 95|valveposition|95|%

    - + set <name> addLog <devspec>:<Reading> [Value] [CN=<caller name>] [!useExcludes]

      Fügt einen zusätzlichen Logeintrag einer Device/Reading-Kombination in die Datenbank ein. Die eventuell im Attribut "DbLogExclude" spezifizierten Readings (im Quelldevice) werden nicht geloggt, es sei denn sie sind im Attribut @@ -7727,14 +7814,14 @@ attr SMA_Energymeter DbLogValueFn
      Das Datenbankfeld "EVENT" wird automatisch mit "addLog" belegt.
      - Es wird KEIN zusätzlicher Event im System erzeugt !

      + Es wird KEIN zusätzlicher Event im System erzeugt !

      - Beispiele:
      - set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
      - set <name> addLog TYPE=SSCam:state
      - set <name> addLog MyWetter:(fc10.*|fc8.*)
      - set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
      - set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

      + Beispiele:
      + set <name> addLog SMA_Energymeter:Bezug_Wirkleistung
      + set <name> addLog TYPE=SSCam:state
      + set <name> addLog MyWetter:(fc10.*|fc8.*)
      + set <name> addLog MyWetter:(wind|wind_ch.*) 20 !useExcludes
      + set <name> addLog TYPE=CUL_HM:FILTER=model=HM-CC-RT-DN:FILTER=subType!=(virtual|):(measured-temp|desired-temp|actuator)

      set <name> addLog USV:state CN=di.cronjob
      In der valueFn-Funktion wird der Aufrufer "di.cronjob" über die Variable $CN ausgewertet und davon abhängig der @@ -7742,28 +7829,28 @@ attr SMA_Energymeter DbLogValueFn valueFn = if($CN eq "di.cronjob" and $TIMESTAMP =~ m/\s00:00:[\d:]+/) { $TIMESTAMP =~ s/\s([^\s]+)/ 23:59:59/ }

    - + set <name> clearReadings

      Leert Readings die von verschiedenen DbLog-Funktionen angelegt wurden.

    - + set <name> eraseReadings

      Löscht alle Readings außer dem Reading "state".

    - + set <name> commitCache

      Im asynchronen Modus (Attribut asyncMode=1), werden die im Speicher gecachten Daten in die Datenbank geschrieben - und danach der Cache geleert. Der interne Timer des asynchronen Modus wird dabei neu gesetzt. + und danach der Cache geleert. Der interne Timer des asynchronen Modus wird dabei neu gesetzt. Der Befehl kann nützlich sein um manuell oder z.B. über ein AT den Cacheinhalt zu einem definierten Zeitpunkt in die - Datenbank zu schreiben.

    + Datenbank zu schreiben.
    set <name> configCheck

      Es werden einige wichtige Einstellungen geprüft und Empfehlungen gegeben falls potentielle Verbesserungen - identifiziert wurden. -

    + identifiziert wurden. +
    set <name> count

      Zählt die Datensätze in den Tabellen current und history und schreibt die Ergebnisse in die Readings - countCurrent und countHistory.

    - + countCurrent und countHistory.
    + set <name> countNbl

      Die non-blocking Ausführung von "set <name> count". @@ -7776,7 +7863,7 @@ attr SMA_Energymeter DbLogValueFn set <name> deleteOldDays <n>

        Löscht Datensätze in Tabelle history, die älter sind als <n> Tage sind. - Die Anzahl der gelöschten Datensätze wird in das Reading lastRowsDeleted geschrieben.

      + Die Anzahl der gelöschten Datensätze wird in das Reading lastRowsDeleted geschrieben.

    set <name> deleteOldDaysNbl <n>

      @@ -7786,57 +7873,57 @@ attr SMA_Energymeter DbLogValueFn Hinweis:
      Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) um FHEM nicht zu blockieren ! -

    +
    - + set <name> exportCache [nopurge | purgecache]

      Wenn DbLog im asynchronen Modus betrieben wird, kann der Cache mit diesem Befehl in ein Textfile geschrieben - werden. Das File wird per Default in dem Verzeichnis (global->modpath)/log/ erstellt. Das Zielverzeichnis kann mit - dem Attribut "expimpdir" geändert werden.
      - Der Name des Files wird automatisch generiert und enthält den Präfix "cache_", gefolgt von dem DbLog-Devicenamen und - dem aktuellen Zeitstempel, z.B. "cache_LogDB_2017-03-23_22-13-55".
      + werden. Das File wird per Default in dem Verzeichnis (global->modpath)/log/ erstellt. Das Zielverzeichnis kann mit + dem Attribut "expimpdir" geändert werden.
      + Der Name des Files wird automatisch generiert und enthält den Präfix "cache_", gefolgt von dem DbLog-Devicenamen und + dem aktuellen Zeitstempel, z.B. "cache_LogDB_2017-03-23_22-13-55".
      Mit den Optionen "nopurge" bzw. "purgecache" wird festgelegt, ob der Cacheinhalt nach dem Export gelöscht werden soll oder nicht. Mit "nopurge" (default) bleibt der Cacheinhalt erhalten.
      Das Attribut "exportCacheAppend" bestimmt dabei, ob mit jedem Exportvorgang ein neues Exportfile angelegt wird (default) oder der Cacheinhalt an das bestehende (neueste) Exportfile angehängt wird.

    - + set <name> importCachefile <file>

      Importiert ein mit "exportCache" geschriebenes File in die Datenbank. - Die verfügbaren Dateien werden per Default im Verzeichnis (global->modpath)/log/ gesucht und eine Drop-Down Liste - erzeugt sofern Dateien gefunden werden. Das Quellverzeichnis kann mit dem Attribut expimpdir geändert werden.
      - Es werden nur die Dateien angezeigt, die dem Muster "cache_", gefolgt von dem DbLog-Devicenamen entsprechen.
      - Zum Beispiel "cache_LogDB_2017-03-23_22-13-55", falls das Log-Device "LogDB" heißt.
      + Die verfügbaren Dateien werden per Default im Verzeichnis (global->modpath)/log/ gesucht und eine Drop-Down Liste + erzeugt sofern Dateien gefunden werden. Das Quellverzeichnis kann mit dem Attribut expimpdir geändert werden.
      + Es werden nur die Dateien angezeigt, die dem Muster "cache_", gefolgt von dem DbLog-Devicenamen entsprechen.
      + Zum Beispiel "cache_LogDB_2017-03-23_22-13-55", falls das Log-Device "LogDB" heißt.
      Nach einem erfolgreichen Import wird das File mit dem Präfix "impdone_" versehen und erscheint dann nicht mehr - in der Drop-Down Liste. Soll ein Cachefile in eine andere als der Quelldatenbank importiert werden, kann das + in der Drop-Down Liste. Soll ein Cachefile in eine andere als der Quelldatenbank importiert werden, kann das DbLog-Device im Filenamen angepasst werden damit dieses File den Suchktiterien entspricht und in der Drop-Down Liste erscheint.

    - + set <name> listCache

      Wenn DbLog im asynchronen Modus betrieben wird (Attribut asyncMode=1), können mit diesem Befehl die im Speicher gecachten Events - angezeigt werden.

    + angezeigt werden.
    set <name> purgeCache

      Im asynchronen Modus (Attribut asyncMode=1), werden die im Speicher gecachten Daten gelöscht. Es werden keine Daten aus dem Cache in die Datenbank geschrieben.

    - + set <name> reduceLog <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

      Reduziert historische Datensätze, die älter sind als <no> Tage und (optional) neuer sind als <nn> Tage auf einen Eintrag (den ersten) pro Stunde je Device & Reading.
      Innerhalb von device/reading können SQL-Wildcards "%" und "_" verwendet werden.

      - - Das Reading "reduceLogState" zeigt den Ausführungsstatus des letzten reduceLog-Befehls.

      + + Das Reading "reduceLogState" zeigt den Ausführungsstatus des letzten reduceLog-Befehls.

      Durch die optionale Angabe von 'average' wird nicht nur die Datenbank bereinigt, sondern alle numerischen Werte - einer Stunde werden auf einen einzigen Mittelwert reduziert.
      + einer Stunde werden auf einen einzigen Mittelwert reduziert.
      Durch die optionale Angabe von 'average=day' wird nicht nur die Datenbank bereinigt, sondern alle numerischen - Werte eines Tages auf einen einzigen Mittelwert reduziert. (impliziert 'average')

      + Werte eines Tages auf einen einzigen Mittelwert reduziert. (impliziert 'average')

      Optional kann als letzer Parameter "exclude=device1:reading1,device2:reading2,...." - angegeben werden um device/reading Kombinationen von reduceLog auszuschließen.

      + angegeben werden um device/reading Kombinationen von reduceLog auszuschließen.

      Optional kann als letzer Parameter "include=device:reading" angegeben werden um - die auf die Datenbank ausgeführte SELECT-Abfrage einzugrenzen, was die RAM-Belastung verringert und die - Performance erhöht.

      + die auf die Datenbank ausgeführte SELECT-Abfrage einzugrenzen, was die RAM-Belastung verringert und die + Performance erhöht.

        Beispiel:
        @@ -7846,30 +7933,30 @@ attr SMA_Energymeter DbLogValueFn
        ACHTUNG: Es wird dringend empfohlen zu überprüfen ob der standard INDEX 'Search_Idx' in der Tabelle 'history' existiert!
        - Die Abarbeitung dieses Befehls dauert unter Umständen (ohne INDEX) extrem lange. FHEM wird durch den Befehl bis - zur Fertigstellung komplett blockiert !

        + Die Abarbeitung dieses Befehls dauert unter Umständen (ohne INDEX) extrem lange. FHEM wird durch den Befehl bis + zur Fertigstellung komplett blockiert !


      - + set <name> reduceLogNbl <no>[:<nn>] [average[=day]] [exclude=device1:reading1,device2:reading2,...]

      -
        - Führt die gleiche Funktion wie "set <name> reduceLog" aus. Im Gegensatz zu reduceLog wird mit FHEM wird durch den Befehl reduceLogNbl nicht - mehr blockiert da diese Funktion non-blocking implementiert ist ! +
          + Führt die gleiche Funktion wie "set <name> reduceLog" aus. Im Gegensatz zu reduceLog wird mit FHEM wird durch den Befehl reduceLogNbl nicht + mehr blockiert da diese Funktion non-blocking implementiert ist !

          Hinweis:
          Obwohl die Funktion selbst non-blocking ist, muß das DbLog-Device im asynchronen Modus betrieben werden (asyncMode = 1) um FHEM nicht zu blockieren !

        - + set <name> reopen [n]

          Schließt die Datenbank und öffnet sie danach sofort wieder wenn keine Zeit [n] in Sekunden angegeben wurde. - Dabei wird die Journaldatei geleert und neu angelegt.
          + Dabei wird die Journaldatei geleert und neu angelegt.
          Verbessert den Datendurchsatz und vermeidet Speicherplatzprobleme.
          - Wurde eine optionale Verzögerungszeit [n] in Sekunden angegeben, wird die Verbindung zur Datenbank geschlossen und erst - nach Ablauf von [n] Sekunden wieder neu verbunden. - Im synchronen Modus werden die Events in dieser Zeit nicht gespeichert. - Im asynchronen Modus werden die Events im Cache gespeichert und nach dem Reconnect in die Datenbank geschrieben.

        + Wurde eine optionale Verzögerungszeit [n] in Sekunden angegeben, wird die Verbindung zur Datenbank geschlossen und erst + nach Ablauf von [n] Sekunden wieder neu verbunden. + Im synchronen Modus werden die Events in dieser Zeit nicht gespeichert. + Im asynchronen Modus werden die Events im Cache gespeichert und nach dem Reconnect in die Datenbank geschrieben.

      set <name> rereadcfg

        Schließt die Datenbank und öffnet sie danach sofort wieder. Dabei wird die Journaldatei geleert und neu angelegt.
        @@ -7879,10 +7966,10 @@ attr SMA_Energymeter DbLogValueFn set <name> userCommand <validSqlStatement>

          Führt einfache sql select Befehle auf der Datenbank aus. Der Befehl und ein zurückgeliefertes - Ergebnis wird in das Reading "userCommand" bzw. "userCommandResult" geschrieben. Das Ergebnis kann nur - einzeilig sein. - Die Ausführung von SQL-Befehlen in DbLog sind deprecated. Dafür sollte das Auswertungsmodul - DbRep genutzt werden.
          + Ergebnis wird in das Reading "userCommand" bzw. "userCommandResult" geschrieben. Das Ergebnis kann nur + einzeilig sein. + Die Ausführung von SQL-Befehlen in DbLog sind deprecated. Dafür sollte das Auswertungsmodul + DbRep genutzt werden.


      @@ -7916,7 +8003,7 @@ attr SMA_Energymeter DbLogValueFn
    • -: identisch wie "history"
    - +
  • <out>
    Ein Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist per default immer auf - zu setzen um die @@ -7929,12 +8016,12 @@ attr SMA_Energymeter DbLogValueFn
  • -: default
  • - +
  • <from> / <to>
    Wird benutzt um den Zeitraum der Daten einzugrenzen. Es ist das folgende Zeitformat oder ein Teilstring davon zu benutzen:
      YYYY-MM-DD_HH24:MI:SS
  • - +
  • <column_spec>
    Für jede column_spec Gruppe wird ein Datenset zurückgegeben welches durch einen Kommentar getrennt wird. Dieser Kommentar repräsentiert @@ -7977,13 +8064,13 @@ attr SMA_Energymeter DbLogValueFn
  • <regexp>
    Diese Zeichenkette wird als Perl Befehl ausgewertet. - Die regexp wird vor dem angegebenen <fn> Parameter ausgeführt. + Die regexp wird vor dem angegebenen <fn> Parameter ausgeführt.
    Bitte zur Beachtung: Diese Zeichenkette darf keine Leerzeichen enthalten da diese sonst als <column_spec> Trennung interpretiert werden und alles nach dem Leerzeichen als neue <column_spec> gesehen wird.
    - + Schlüsselwörter
  • $val ist der aktuelle Wert die die Datenbank für ein Device/Reading ausgibt.
  • $ts ist der aktuelle Timestamp des Logeintrages.
  • @@ -7993,32 +8080,32 @@ attr SMA_Energymeter DbLogValueFn nicht für eine Folgeberechnung verwendet. - +

    Beispiele:
    • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature
    • -
    • get myDbLog current ALL - - %:temperature

    • +
    • get myDbLog current ALL - - %:temperature

    • Damit erhält man alle aktuellen Readings "temperature" von allen in der DB geloggten Devices. Achtung: bei Nutzung von Jokerzeichen auf die history-Tabelle kann man sein FHEM aufgrund langer Laufzeit lahmlegen! -
    • get myDbLog - - 2012-11-10_10 2012-11-10_20 KS300:temperature::int1
      +
    • get myDbLog - - 2012-11-10_10 2012-11-10_20 KS300:temperature::int1
      gibt Daten aus von 10Uhr bis 20Uhr am 10.11.2012
    • -
    • get myDbLog - all 2012-11-10 2012-11-20 KS300:temperature
    • +
    • get myDbLog - all 2012-11-10 2012-11-20 KS300:temperature
    • -
    • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature KS300:rain::delta-h KS300:rain::delta-d
    • +
    • get myDbLog - - 2012-11-10 2012-11-20 KS300:temperature KS300:rain::delta-h KS300:rain::delta-d
    • -
    • get myDbLog - - 2012-11-10 2012-11-20 MyFS20:data:::$val=~s/(on|off).*/$1eq"on"?1:0/eg
      +
    • get myDbLog - - 2012-11-10 2012-11-20 MyFS20:data:::$val=~s/(on|off).*/$1eq"on"?1:0/eg
      gibt 1 zurück für alle Ausprägungen von on* (on|on-for-timer etc) und 0 für alle off*
    • -
    • get myDbLog - - 2012-11-10 2012-11-20 Bodenfeuchte:data:::$val=~s/.*B:\s([-\.\d]+).*/$1/eg
      +
    • get myDbLog - - 2012-11-10 2012-11-20 Bodenfeuchte:data:::$val=~s/.*B:\s([-\.\d]+).*/$1/eg
      Beispiel von OWAD: Ein Wert wie z.B.: "A: 49.527 % B: 66.647 % C: 9.797 % D: 0.097 V"
      und die Ausgabe ist für das Reading B folgende: 2012-11-20_10:23:54 66.647
    • -
    • get DbLog - - 2013-05-26 2013-05-28 Pumpe:data::delta-ts:$val=~s/on/hide/
      +
    • get DbLog - - 2013-05-26 2013-05-28 Pumpe:data::delta-ts:$val=~s/on/hide/
      Realisierung eines Betriebsstundenzählers. Durch delta-ts wird die Zeit in Sek zwischen den Log- Einträgen ermittelt. Die Zeiten werden bei den on-Meldungen nicht ausgegeben welche einer Abschaltzeit entsprechen würden.
    • @@ -8038,24 +8125,24 @@ attr SMA_Energymeter DbLogValueFn
    • <name>
      Der Name des definierten DbLogs, so wie er in der fhem.cfg angegeben wurde.
    • -
    • <in>
      +
    • <in>
      Ein Dummy Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist immer auf - zu setzen.
    • -
    • <out>
      +
    • <out>
      Ein Dummy Parameter um eine Kompatibilität zum Filelog herzustellen. Dieser Parameter ist auf webchart zu setzen um die Charting Get Funktion zu nutzen.
    • -
    • <from> / <to>
      +
    • <from> / <to>
      Wird benutzt um den Zeitraum der Daten einzugrenzen. Es ist das folgende Zeitformat zu benutzen:
        YYYY-MM-DD_HH24:MI:SS
    • -
    • <device>
      +
    • <device>
      Ein String, der das abzufragende Device darstellt.
    • -
    • <querytype>
      +
    • <querytype>
      Ein String, der die zu verwendende Abfragemethode darstellt. Zur Zeit unterstützte Werte sind:
      getreadings um für ein bestimmtes device alle Readings zu erhalten
      getdevices um alle verfügbaren devices zu erhalten
      @@ -8071,22 +8158,22 @@ attr SMA_Energymeter DbLogValueFn yearstats um Statistiken für einen Wert (yaxis) für ein Jahr abzufragen.
    • -
    • <xaxis>
      +
    • <xaxis>
      Ein String, der die X-Achse repräsentiert
    • -
    • <yaxis>
      +
    • <yaxis>
      Ein String, der die Y-Achse repräsentiert
    • -
    • <savename>
      +
    • <savename>
      Ein String, unter dem ein Chart in der Datenbank gespeichert werden soll
    • -
    • <chartconfig>
      +
    • <chartconfig>
      Ein jsonstring der den zu speichernden Chart repräsentiert
    • -
    • <pagingstart>
      +
    • <pagingstart>
      Ein Integer um den Startwert für die Abfrage 'getTableData' festzulegen
    • -
    • <paginglimit>
      +
    • <paginglimit>
      Ein Integer um den Limitwert für die Abfrage 'getTableData' festzulegen


    @@ -8096,20 +8183,20 @@ attr SMA_Energymeter DbLogValueFn
  • get logdb - webchart "" "" "" getcharts
    Liefert alle gespeicherten Charts aus der Datenbank
  • -
  • get logdb - webchart "" "" "" getdevices
    +
  • get logdb - webchart "" "" "" getdevices
    Liefert alle verfügbaren Devices aus der Datenbank
  • -
  • get logdb - webchart "" "" ESA2000_LED_011e getreadings
    +
  • get logdb - webchart "" "" ESA2000_LED_011e getreadings
    Liefert alle verfügbaren Readings aus der Datenbank unter Angabe eines Gerätes
  • -
  • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e timerange TIMESTAMP day_kwh
    +
  • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e timerange TIMESTAMP day_kwh
    Liefert Chart-Daten, die auf folgenden Parametern basieren: 'xaxis', 'yaxis', 'device', 'to' und 'from'
    Die Ausgabe erfolgt als JSON, z.B.: [{'TIMESTAMP':'2013-02-11 00:10:10','VALUE':'0.22431388090756'},{'TIMESTAMP'.....}]
  • -
  • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e savechart TIMESTAMP day_kwh tageskwh
    +
  • get logdb - webchart 2013-02-11_00:00:00 2013-02-12_00:00:00 ESA2000_LED_011e savechart TIMESTAMP day_kwh tageskwh
    Speichert einen Chart unter Angabe eines 'savename' und seiner zugehörigen Konfiguration
  • -
  • get logdb - webchart "" "" "" deletechart "" "" 7
    +
  • get logdb - webchart "" "" "" deletechart "" "" 7
    Löscht einen zuvor gespeicherten Chart unter Angabe einer id


  • @@ -8123,15 +8210,15 @@ attr SMA_Energymeter DbLogValueFn
  • addStateEvent
      - attr <device> addStateEvent [0|1] -
      + attr <device> addStateEvent [0|1] +
      Bekanntlich wird normalerweise bei einem Event mit dem Reading "state" der state-String entfernt, d.h. der Event ist nicht zum Beispiel "state: on" sondern nur "on".
      - Meistens ist es aber hilfreich in DbLog den kompletten Event verarbeiten zu können. Deswegen übernimmt DbLog per Default + Meistens ist es aber hilfreich in DbLog den kompletten Event verarbeiten zu können. Deswegen übernimmt DbLog per Default den Event inklusive dem Reading-String "state".
      In einigen Fällen, z.B. alten oder speziellen Modulen, ist es allerdings wünschenswert den state-String wie gewöhnlich - zu entfernen. In diesen Fällen bitte addStateEvent = "0" setzen. - Versuchen sie bitte diese Einstellung, falls es mit dem Standard Probleme geben sollte. + zu entfernen. In diesen Fällen bitte addStateEvent = "0" setzen. + Versuchen sie bitte diese Einstellung, falls es mit dem Standard Probleme geben sollte.
  • @@ -8142,18 +8229,18 @@ attr SMA_Energymeter DbLogValueFn
  • asyncMode
      - attr <device> asyncMode [1|0] -
      - + attr <device> asyncMode [1|0] +
      + Dieses Attribut stellt den Arbeitsmodus von DbLog ein. Im asynchronen Modus (asyncMode=1), werden die zu speichernden Events zunächst in Speicher - gecacht. Nach Ablauf der Synchronisationszeit (Attribut syncInterval) oder bei Erreichen der maximalen Anzahl der Datensätze im Cache - (Attribut cacheLimit) werden die gecachten Events im Block in die Datenbank geschrieben. - Ist die Datenbank nicht verfügbar, werden die Events weiterhin im Speicher gehalten und nach Ablauf des Syncintervalls in die Datenbank - geschrieben falls sie dann verfügbar ist.
      - Im asynchronen Mode werden die Daten nicht blockierend mit einem separaten Hintergrundprozess in die Datenbank geschrieben. - Det Timeout-Wert für diesen Hintergrundprozess kann mit dem Attribut "timeout" (Default 86400s) eingestellt werden. - Im synchronen Modus (Normalmodus) werden die Events nicht gecacht und sofort in die Datenbank geschrieben. Ist die Datenbank nicht - verfügbar, gehen sie verloren.
      + gecacht. Nach Ablauf der Synchronisationszeit (Attribut syncInterval) oder bei Erreichen der maximalen Anzahl der Datensätze im Cache + (Attribut cacheLimit) werden die gecachten Events im Block in die Datenbank geschrieben. + Ist die Datenbank nicht verfügbar, werden die Events weiterhin im Speicher gehalten und nach Ablauf des Syncintervalls in die Datenbank + geschrieben falls sie dann verfügbar ist.
      + Im asynchronen Mode werden die Daten nicht blockierend mit einem separaten Hintergrundprozess in die Datenbank geschrieben. + Det Timeout-Wert für diesen Hintergrundprozess kann mit dem Attribut "timeout" (Default 86400s) eingestellt werden. + Im synchronen Modus (Normalmodus) werden die Events nicht gecacht und sofort in die Datenbank geschrieben. Ist die Datenbank nicht + verfügbar, gehen sie verloren.
  • @@ -8163,9 +8250,9 @@ attr SMA_Energymeter DbLogValueFn
  • bulkInsert
      - attr <device> bulkInsert [1|0] -
      - + attr <device> bulkInsert [1|0] +
      + Schaltet den Insert-Modus zwischen "Array" (default) und "Bulk" um. Der Bulk Modus führt beim Insert von sehr vielen Datensätzen in die history-Tabelle zu einer erheblichen Performancesteigerung vor allem im asynchronen Mode. Um die volle Performancesteigerung zu erhalten, sollte in diesem Fall das Attribut "DbLogType" @@ -8179,21 +8266,21 @@ attr SMA_Energymeter DbLogValueFn
    • commitMode
        - attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] -
        - + attr <device> commitMode [basic_ta:on | basic_ta:off | ac:on_ta:on | ac:on_ta:off | ac:off_ta:on] +
        + Ändert die Verwendung der Datenbank Autocommit- und/oder Transaktionsfunktionen. Wird Transaktion "aus" verwendet, werden im asynchronen Modus nicht gespeicherte Datensätze nicht an den Cache zurück gegeben. - Dieses Attribut ist ein advanced feature und sollte nur im konkreten Bedarfs- bzw. Supportfall geändert werden.

        - -
          + Dieses Attribut ist ein advanced feature und sollte nur im konkreten Bedarfs- bzw. Supportfall geändert werden.

          + +
          • basic_ta:on - Autocommit Servereinstellung / Transaktion ein (default)
          • -
          • basic_ta:off - Autocommit Servereinstellung / Transaktion aus
          • -
          • ac:on_ta:on - Autocommit ein / Transaktion ein
          • -
          • ac:on_ta:off - Autocommit ein / Transaktion aus
          • -
          • ac:off_ta:on - Autocommit aus / Transaktion ein (Autocommit "aus" impliziert Transaktion "ein")
          • -
          +
        • basic_ta:off - Autocommit Servereinstellung / Transaktion aus
        • +
        • ac:on_ta:on - Autocommit ein / Transaktion ein
        • +
        • ac:on_ta:off - Autocommit ein / Transaktion aus
        • +
        • ac:off_ta:on - Autocommit aus / Transaktion ein (Autocommit "aus" impliziert Transaktion "ein")
        • +
    @@ -8203,14 +8290,14 @@ attr SMA_Energymeter DbLogValueFn
  • cacheEvents
      - attr <device> cacheEvents [2|1|0] -
      -
        + attr <device> cacheEvents [2|1|0] +
        +
        • cacheEvents=1: es werden Events für das Reading CacheUsage erzeugt wenn ein Event zum Cache hinzugefügt wurde.
        • -
        • cacheEvents=2: es werden Events für das Reading CacheUsage erzeugt wenn im asynchronen Mode der Schreibzyklus in die - Datenbank beginnt. CacheUsage enthält zu diesem Zeitpunkt die Anzahl der in die Datenbank zu schreibenden - Datensätze.

        • -
        +
      • cacheEvents=2: es werden Events für das Reading CacheUsage erzeugt wenn im asynchronen Mode der Schreibzyklus in die + Datenbank beginnt. CacheUsage enthält zu diesem Zeitpunkt die Anzahl der in die Datenbank zu schreibenden + Datensätze.

      • +
  • @@ -8220,10 +8307,10 @@ attr SMA_Energymeter DbLogValueFn
  • cacheLimit
      - - attr <device> cacheLimit <n> -
      - + + attr <device> cacheLimit <n> +
      + Im asynchronen Logmodus wird der Cache in die Datenbank weggeschrieben und geleert wenn die Anzahl <n> Datensätze im Cache erreicht ist (Default: 500). Der Timer des asynchronen Logmodus wird dabei neu auf den Wert des Attributs "syncInterval" gesetzt. Im Fehlerfall wird ein erneuter Schreibversuch frühestens nach syncInterval/2 gestartet.
      @@ -8236,15 +8323,15 @@ attr SMA_Energymeter DbLogValueFn
    • colEvent
        - - attr <device> colEvent <n> -
        - - Die Feldlänge für das DB-Feld EVENT wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colEvent=0 wird das Datenbankfeld - EVENT nicht gefüllt.
        - Hinweis:
        - Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
        + + attr <device> colEvent <n> +
        + + Die Feldlänge für das DB-Feld EVENT wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colEvent=0 wird das Datenbankfeld + EVENT nicht gefüllt.
        + Hinweis:
        + Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
    @@ -8254,15 +8341,15 @@ attr SMA_Energymeter DbLogValueFn
  • colReading
      - - attr <device> colReading <n> -
      - - Die Feldlänge für das DB-Feld READING wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colReading=0 wird das Datenbankfeld - READING nicht gefüllt.
      - Hinweis:
      - Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
      + + attr <device> colReading <n> +
      + + Die Feldlänge für das DB-Feld READING wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colReading=0 wird das Datenbankfeld + READING nicht gefüllt.
      + Hinweis:
      + Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
  • @@ -8272,15 +8359,15 @@ attr SMA_Energymeter DbLogValueFn
  • colValue
      - - attr <device> colValue <n> -
      - - Die Feldlänge für das DB-Feld VALUE wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul - verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colValue=0 wird das Datenbankfeld - VALUE nicht gefüllt.
      - Hinweis:
      - Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
      + + attr <device> colValue <n> +
      + + Die Feldlänge für das DB-Feld VALUE wird userspezifisch angepasst. Mit dem Attribut kann der Default-Wert im Modul + verändert werden wenn die Feldlänge in der Datenbank manuell geändert wurde. Mit colValue=0 wird das Datenbankfeld + VALUE nicht gefüllt.
      + Hinweis:
      + Mit gesetztem Attribut gelten alle Feldlängenbegrenzungen auch für SQLite DB wie im Internal COLUMNS angezeigt !
  • @@ -8290,36 +8377,36 @@ attr SMA_Energymeter DbLogValueFn
  • DbLogType
      - - attr <device> DbLogType [Current|History|Current/History|SampleFill/History] -
      - + + attr <device> DbLogType [Current|History|Current/History|SampleFill/History] +
      + Dieses Attribut legt fest, welche Tabelle oder Tabellen in der Datenbank genutzt werden sollen. Ist dieses Attribut nicht gesetzt, wird per default die Einstellung history verwendet.

      - - Bedeutung der Einstellungen sind:

      - -
        + + Bedeutung der Einstellungen sind:

        + +
          + Die current-Tabelle wird bei der SVG-Erstellung ausgewertet. - + + DbRep-Device
          "set <DbRep-Name> tableCurrentFillup" mit + einem einstellbaren Extract der history-Tabelle gefüllt werden (advanced Feature).
          Current Events werden nur in die current-Tabelle geloggt. - Die current-Tabelle wird bei der SVG-Erstellung ausgewertet.
          History Events werden nur in die history-Tabelle geloggt. Es wird keine DropDown-Liste mit Vorschlägen bei der SVG-Erstellung erzeugt.
          Current/History Events werden sowohl in die current- also auch in die hitory Tabelle geloggt. - Die current-Tabelle wird bei der SVG-Erstellung ausgewertet.
          SampleFill/History Events werden nur in die history-Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet und + Die current-Tabelle wird bei der SVG-Erstellung ausgewertet.
          SampleFill/History Events werden nur in die history-Tabelle geloggt. Die current-Tabelle wird bei der SVG-Erstellung ausgewertet und kann zur Erzeugung einer DropDown-Liste mittels einem - DbRep-Device
          "set <DbRep-Name> tableCurrentFillup" mit - einem einstellbaren Extract der history-Tabelle gefüllt werden (advanced Feature).
          -
        -
        -
        - - Hinweis:
        - Die Current-Tabelle muß genutzt werden um eine Device:Reading-DropDownliste zur Erstellung eines - SVG-Plots zu erhalten.
        +
      +
      +
      + + Hinweis:
      + Die Current-Tabelle muß genutzt werden um eine Device:Reading-DropDownliste zur Erstellung eines + SVG-Plots zu erhalten.
  • @@ -8329,11 +8416,11 @@ attr SMA_Energymeter DbLogValueFn
  • DbLogSelectionMode
      - - attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] -
      + + attr <device> DbLogSelectionMode [Exclude|Include|Exclude/Include] +
      - Dieses fuer DbLog-Devices spezifische Attribut beeinflußt, wie die Device-spezifischen Attribute + Dieses fuer DbLog-Devices spezifische Attribut beeinflußt, wie die Device-spezifischen Attribute DbLogExclude und DbLogInclude (s.u.) ausgewertet werden.
      Fehlt dieses Attribut, wird "Exclude" als Default angenommen.
      @@ -8361,9 +8448,9 @@ attr SMA_Energymeter DbLogValueFn attr <device> DbLogInclude regex:MinInterval[:force],[regex:MinInterval[:force]] ...
      - Wird DbLog genutzt, wird in allen Devices das Attribut DbLogInclude propagiert. - DbLogInclude funktioniert im Endeffekt genau wie DbLogExclude, ausser dass Readings mit diesen RegExp - in das Logging eingeschlossen statt ausgeschlossen werden koennen.
      + Wird DbLog genutzt, wird in allen Devices das Attribut DbLogInclude propagiert. + DbLogInclude funktioniert im Endeffekt genau wie DbLogExclude, ausser dass Readings mit diesen RegExp + in das Logging eingeschlossen statt ausgeschlossen werden koennen.
      Ist MinIntervall angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der Wert des Readings sich nicht verändert hat. Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der @@ -8371,7 +8458,7 @@ attr SMA_Energymeter DbLogValueFn Siehe auch die DbLog Attribute defaultMinInterval und DbLogSelectionMode. Es beeinflußt wie DbLogExclude und DbLogInclude ausgewertet werden.

      - Beispiel
      + Beispiel
      attr MyDevice1 DbLogInclude .*
      attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300,battery:3600
      attr MyDevice2 DbLogInclude state,(floorplantext|MyUserReading):300:force,battery:3600:force @@ -8389,8 +8476,8 @@ attr SMA_Energymeter DbLogValueFn
      Wird DbLog genutzt, wird in allen Devices das Attribut DbLogExclude propagiert. - Der Wert des Attributes wird als Regexp ausgewertet und schliesst die damit matchenden Readings von einem Logging aus. - Einzelne Regexp werden durch Komma getrennt.
      + Der Wert des Attributes wird als Regexp ausgewertet und schliesst die damit matchenden Readings von einem Logging aus. + Einzelne Regexp werden durch Komma getrennt.
      Ist MinIntervall angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der Wert des Readings sich nicht verändert hat. Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der @@ -8398,7 +8485,7 @@ attr SMA_Energymeter DbLogValueFn Siehe auch die DbLog Attribute defaultMinInterval und DbLogSelectionMode. Es beeinflußt wie DbLogExclude und DbLogInclude ausgewertet werden.

      - Beispiel
      + Beispiel
      attr MyDevice1 DbLogExclude .*
      attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300,battery:3600
      attr MyDevice2 DbLogExclude state,(floorplantext|MyUserReading):300:force,battery:3600:force @@ -8411,23 +8498,24 @@ attr SMA_Energymeter DbLogValueFn
    • DbLogValueFn
        - - attr <device> DbLogValueFn {} -
        + + attr <device> DbLogValueFn {} +
        Wird DbLog genutzt, wird in allen Devices das Attribut DbLogValueFn propagiert. - Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $READING, $VALUE (Wert des Readings) und - $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt. - Außerdem hat man lesenden Zugriff auf $DEVICE (den Namen des Devices) und $EVENT für eine Auswertung im Perl-Ausdruck. - $DEVICE und $EVENT kann nicht verändert werden.
        - Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden, ansonsten wird der - geänderte $timestamp nicht übernommen. - Zusätzlich kann durch Setzen der Variable "$IGNORE=1" der Datensatz vom Logging ausgeschlossen werden.
        + Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $READING, $VALUE (Wert des Readings) und + $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt.
        + Außerdem hat man Lesezugriff auf $DEVICE (den Namen des Quellgeräts), $EVENT, $LASTTIMESTAMP und $LASTVALUE + zur Bewertung in Ihrem Ausdruck.
        + Die Variablen $LASTTIMESTAMP und $LASTVALUE enthalten Zeit und Wert des zuletzt protokollierten Datensatzes von $DEVICE / $READING.
        + Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden, ansonsten wird der + geänderte $timestamp nicht übernommen. + Zusätzlich kann durch Setzen der Variable "$IGNORE=1" der Datensatz vom Logging ausgeschlossen werden.
        Die devicespezifische Funktion in "DbLogValueFn" wird vor der eventuell im DbLog-Device vorhandenen Funktion im Attribut "valueFn" auf den Datensatz angewendet.

        - - Beispiel
        + + Beispiel
         attr SMA_Energymeter DbLogValueFn
         { 
        @@ -8438,7 +8526,7 @@ attr SMA_Energymeter DbLogValueFn
             $IGNORE=1;
           }
         }
        -	   
        +
    @@ -8450,7 +8538,7 @@ attr SMA_Energymeter DbLogValueFn attr <device> dbSchema <schema>
    - + Dieses Attribut ist setzbar für die Datenbanken MySQL/MariaDB und PostgreSQL. Die Tabellennamen (current/history) werden durch das angegebene Datenbankschema ergänzt. Das Attribut ist ein advanced Feature und nomalerweise nicht nötig zu setzen.
    @@ -8469,20 +8557,20 @@ attr SMA_Energymeter DbLogValueFn Mit diesem Attribut wird ein Standard Minimum Intervall für devspec festgelegt. Ist defaultMinInterval angegeben, wird der Logeintrag nicht geloggt, wenn das Intervall noch nicht erreicht und der - Wert des Readings sich nicht verändert hat.
    + Wert des Readings sich nicht verändert hat.
    Ist der optionale Parameter "force" hinzugefügt, wird der Logeintrag auch dann nicht geloggt, wenn sich der Wert des Readings verändert hat.
    Eventuell im Quelldevice angegebene Spezifikationen DbLogExclude / DbLogInclude haben Vorrag und werden durch - defaultMinInterval nicht überschrieben.
    + defaultMinInterval nicht überschrieben.
    Die Eingabe kann mehrzeilig erfolgen.

    - Beispiele
    + Beispiele
    attr dblog defaultMinInterval .*::120::force
    - # Events aller Devices werden nur geloggt, wenn 120 Sekunden zum letzten Logeintrag vergangen sind ist (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.
    + # Events aller Devices werden nur geloggt, wenn 120 Sekunden zum letzten Logeintrag vergangen sind ist (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.
    attr dblog defaultMinInterval (Weather|SMA)::300
    - # Events der Devices "Weather" und "SMA" werden nur geloggt wenn 300 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) und sich der Wert nicht geändert hat.
    + # Events der Devices "Weather" und "SMA" werden nur geloggt wenn 300 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) und sich der Wert nicht geändert hat.
    attr dblog defaultMinInterval TYPE=CUL_HM::600::force
    - # Events aller Devices des Typs "CUL_HM" werden nur geloggt, wenn 600 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes. + # Events aller Devices des Typs "CUL_HM" werden nur geloggt, wenn 600 Sekunden zum letzten Logeintrag vergangen sind (Reading spezifisch) unabhängig von einer eventuellen Änderung des Wertes.
  • @@ -8507,31 +8595,31 @@ attr SMA_Energymeter DbLogValueFn
  • excludeDevs
      - - attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> -
      + + attr <device> excludeDevs <devspec1>[#Reading],<devspec2>[#Reading],<devspec...> +
      - Die Device/Reading-Kombinationen "devspec1#Reading", "devspec2#Reading" bis "devspec..." werden vom Logging in die + Die Device/Reading-Kombinationen "devspec1#Reading", "devspec2#Reading" bis "devspec..." werden vom Logging in die Datenbank global ausgeschlossen.
      Die Angabe eines auszuschließenden Readings ist optional.
      Somit können Device/Readings explizit bzw. konsequent vom Logging ausgeschlossen werden ohne Berücksichtigung anderer Excludes oder Includes (z.B. im DEF). - Die auszuschließenden Devices können als Geräte-Spezifikation angegeben werden. - Für weitere Details bezüglich devspec siehe Geräte-Spezifikation.

      - - Beispiel
      + Die auszuschließenden Devices können als Geräte-Spezifikation angegeben werden. + Für weitere Details bezüglich devspec siehe Geräte-Spezifikation.

      + + Beispiel
      - attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog -
      - # Es werden die Devices global bzw. Devices beginnend mit "Log" oder "Cam" bzw. Devices vom Typ "DbLog" vom Logging ausgeschlossen.
      + attr <device> excludeDevs global,Log.*,Cam.*,TYPE=DbLog +
      + # Es werden die Devices global bzw. Devices beginnend mit "Log" oder "Cam" bzw. Devices vom Typ "DbLog" vom Logging ausgeschlossen.
      - attr <device> excludeDevs .*#.*Wirkleistung.* -
      - # Es werden alle Device/Reading-Kombinationen mit "Wirkleistung" im Reading vom Logging ausgeschlossen.
      + attr <device> excludeDevs .*#.*Wirkleistung.* +
      + # Es werden alle Device/Reading-Kombinationen mit "Wirkleistung" im Reading vom Logging ausgeschlossen.
      attr <device> excludeDevs SMA_Energymeter#Bezug_WirkP_Zaehler_Diff -
      - # Es wird der Event mit Device "SMA_Energymeter" und Reading "Bezug_WirkP_Zaehler_Diff" vom Logging ausgeschlossen.
      +
      + # Es wird der Event mit Device "SMA_Energymeter" und Reading "Bezug_WirkP_Zaehler_Diff" vom Logging ausgeschlossen.
  • @@ -8542,18 +8630,18 @@ attr SMA_Energymeter DbLogValueFn
  • expimpdir
      - - attr <device> expimpdir <directory> -
      + + attr <device> expimpdir <directory> +
      - In diesem Verzeichnis wird das Cachefile beim Export angelegt bzw. beim Import gesucht. Siehe set-Kommandos - "exportCache" bzw. "importCachefile". Das Default-Verzeichnis ist "(global->modpath)/log/". - Das im Attribut angegebene Verzeichnis muss vorhanden und beschreibbar sein.

      - - Beispiel
      + In diesem Verzeichnis wird das Cachefile beim Export angelegt bzw. beim Import gesucht. Siehe set-Kommandos + "exportCache" bzw. "importCachefile". Das Default-Verzeichnis ist "(global->modpath)/log/". + Das im Attribut angegebene Verzeichnis muss vorhanden und beschreibbar sein.

      + + Beispiel
      - attr <device> expimpdir /opt/fhem/cache/ -
      + attr <device> expimpdir /opt/fhem/cache/ +
  • @@ -8563,10 +8651,10 @@ attr SMA_Energymeter DbLogValueFn
  • exportCacheAppend
      - - attr <device> exportCacheAppend [1|0] -
      - + + attr <device> exportCacheAppend [1|0] +
      + Wenn gesetzt, wird beim Export des Cache ("set <device> exportCache") der Cacheinhalt an das neueste bereits vorhandene Exportfile angehängt. Ist noch kein Exportfile vorhanden, wird es neu angelegt.
      Ist das Attribut nicht gesetzt, wird bei jedem Exportvorgang ein neues Exportfile angelegt. (default)
      @@ -8579,10 +8667,10 @@ attr SMA_Energymeter DbLogValueFn
    • noNotifyDev
        - - attr <device> noNotifyDev [1|0] -
        - + + attr <device> noNotifyDev [1|0] +
        + Erzwingt dass NOTIFYDEV nicht gesetzt und somit nicht verwendet wird.
    • @@ -8593,10 +8681,10 @@ attr SMA_Energymeter DbLogValueFn
    • noSupportPK
        - - attr <device> noSupportPK [1|0] -
        - + + attr <device> noSupportPK [1|0] +
        + Deaktiviert die programmtechnische Unterstützung eines gesetzten Primary Key durch das Modul.
    • @@ -8607,13 +8695,13 @@ attr SMA_Energymeter DbLogValueFn
    • showproctime
        - attr <device> showproctime [1|0] -
        - + attr <device> showproctime [1|0] +
        + Wenn gesetzt, zeigt das Reading "sql_processing_time" die benötigte Abarbeitungszeit (in Sekunden) für die SQL-Ausführung der - durchgeführten Funktion. Dabei wird nicht ein einzelnes SQL-Statement, sondern die Summe aller notwendigen SQL-Abfragen innerhalb der - jeweiligen Funktion betrachtet. Das Reading "background_processing_time" zeigt die im Kindprozess BlockingCall verbrauchte Zeit.
        - + durchgeführten Funktion. Dabei wird nicht ein einzelnes SQL-Statement, sondern die Summe aller notwendigen SQL-Abfragen innerhalb der + jeweiligen Funktion betrachtet. Das Reading "background_processing_time" zeigt die im Kindprozess BlockingCall verbrauchte Zeit.
        +
    @@ -8623,13 +8711,13 @@ attr SMA_Energymeter DbLogValueFn
  • showNotifyTime
      - attr <device> showNotifyTime [1|0] -
      - + attr <device> showNotifyTime [1|0] +
      + Wenn gesetzt, zeigt das Reading "notify_processing_time" die benötigte Abarbeitungszeit (in Sekunden) für die - Abarbeitung der DbLog Notify-Funktion. Das Attribut ist für Performance Analysen geeignet und hilft auch die Unterschiede - im Zeitbedarf bei der Umschaltung des synchronen in den asynchronen Modus festzustellen.
      - + Abarbeitung der DbLog Notify-Funktion. Das Attribut ist für Performance Analysen geeignet und hilft auch die Unterschiede + im Zeitbedarf bei der Umschaltung des synchronen in den asynchronen Modus festzustellen.
      +
  • @@ -8639,9 +8727,9 @@ attr SMA_Energymeter DbLogValueFn
  • syncEvents
      - attr <device> syncEvents [1|0] -
      - + attr <device> syncEvents [1|0] +
      + es werden Events für Reading NextSync erzeugt.
  • @@ -8652,12 +8740,12 @@ attr SMA_Energymeter DbLogValueFn
  • syncInterval
      - attr <device> syncInterval <n> -
      - + attr <device> syncInterval <n> +
      + Wenn DbLog im asynchronen Modus betrieben wird (Attribut asyncMode=1), wird mit diesem Attribut das Intervall in Sekunden zur Speicherung - der im Speicher gecachten Events in die Datenbank eingestellt. Der Defaultwert ist 30 Sekunden.
      - + der im Speicher gecachten Events in die Datenbank eingestellt. Der Defaultwert ist 30 Sekunden.
      +
  • @@ -8667,9 +8755,9 @@ attr SMA_Energymeter DbLogValueFn
  • suppressAddLogV3
      - attr <device> suppressAddLogV3 [1|0] -
      - + attr <device> suppressAddLogV3 [1|0] +
      + Wenn gesetzt werden verbose 3 Logeinträge durch die addLog-Funktion unterdrückt.
  • @@ -8680,11 +8768,11 @@ attr SMA_Energymeter DbLogValueFn
  • suppressUndef
      - attr <device> suppressUndef -
      + attr <device> suppressUndef +
      Unterdrueckt alle undef Werte die durch eine Get-Anfrage zb. Plot aus der Datenbank selektiert werden

      - Beispiel
      + Beispiel
      #DbLog eMeter:power:::$val=($val>1500)?undef:$val
  • @@ -8695,9 +8783,9 @@ attr SMA_Energymeter DbLogValueFn
  • timeout
      - - attr <device> timeout <n> -
      + + attr <device> timeout <n> +
      Setzt den Timeout-Wert für den Schreibzyklus in die Datenbank im asynchronen Modus (default 86400s).
  • @@ -8708,13 +8796,13 @@ attr SMA_Energymeter DbLogValueFn
  • traceFlag
      - - attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> -
      + + attr <device> traceFlag <ALL|SQL|CON|ENC|DBD|TXN> +
      Bestimmt das Tracing von bestimmten Aktivitäten innerhalb des Datenbankinterfaces und Treibers. Das Attribut ist nur für den Fehler- bzw. Supportfall gedacht.

      -
        +
          @@ -8725,8 +8813,8 @@ attr SMA_Energymeter DbLogValueFn
          ALL schaltet alle DBI- und Treiberflags an.
          TXN verfolgt Transaktionen.
          -
        -
        +
      +
  • @@ -8737,12 +8825,12 @@ attr SMA_Energymeter DbLogValueFn
  • traceHandles
      - attr <device> traceHandles <n> -
      - + attr <device> traceHandles <n> +
      + Wenn gesetzt, werden alle <n> Sekunden die systemweit vorhandenen Datenbank-Handles im Logfile ausgegeben. Dieses Attribut ist nur für Supportzwecke relevant. (Default: 0 = ausgeschaltet)
      - +
  • @@ -8752,14 +8840,14 @@ attr SMA_Energymeter DbLogValueFn
  • traceLevel
      - - attr <device> traceLevel <0|1|2|3|4|5|6|7> -
      + + attr <device> traceLevel <0|1|2|3|4|5|6|7> +
      Schaltet die Trace-Funktion des Moduls ein.
      Achtung ! Das Attribut ist nur für den Fehler- bzw. Supportfall gedacht. Es werden sehr viele Einträge in das FHEM Logfile vorgenommen !

      -
        +
          @@ -8772,8 +8860,8 @@ attr SMA_Energymeter DbLogValueFn
          0 Tracing ist disabled. (Default)
          5-7 Wie oben, aber mit mehr und mehr internen Informationen.
          -
        -
        +
      +
  • @@ -8784,12 +8872,12 @@ attr SMA_Energymeter DbLogValueFn
  • useCharfilter
      - - attr <device> useCharfilter [0|1] -
      + + attr <device> useCharfilter [0|1] +
      wenn gesetzt, werden nur ASCII Zeichen von 32 bis 126 im Event akzeptiert. (default: 0)
      - Das sind die Zeichen " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~".
      - Umlaute und "€" werden umgesetzt (z.B. ä nach ae, € nach EUR).
      + Das sind die Zeichen " A-Za-z0-9!"#$%&'()*+,-.\/:;<=>?@[\\]^_`{|}~".
      + Umlaute und "€" werden umgesetzt (z.B. ä nach ae, € nach EUR).
  • @@ -8799,31 +8887,31 @@ attr SMA_Energymeter DbLogValueFn
  • valueFn
      - - attr <device> valueFn {} -
      + + attr <device> valueFn {} +
      - Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, $VALUE (Wert des Readings) und - $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt. - Außerdem hat man lesenden Zugriff auf $EVENT für eine Auswertung im Perl-Ausdruck. - Diese Variable kann aber nicht verändert werden.
      - Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden, ansonsten wird der - geänderte $timestamp nicht übernommen. - Zusätzlich kann durch Setzen der Variable "$IGNORE=1" ein Datensatz vom Logging ausgeschlossen werden.

      - - Beispiele
      + Es kann über einen Perl-Ausdruck auf die Variablen $TIMESTAMP, $DEVICE, $DEVICETYPE, $READING, $VALUE (Wert des Readings) und + $UNIT (Einheit des Readingswert) zugegriffen werden und diese verändern, d.h. die veränderten Werte werden geloggt.
      + Außerdem hat man Lesezugriff auf $EVENT, $LASTTIMESTAMP und $LASTVALUE zur Bewertung im Ausdruck.
      + Die Variablen $LASTTIMESTAMP und $LASTVALUE enthalten Zeit und Wert des zuletzt protokollierten Datensatzes von $DEVICE / $READING.
      + Soll $TIMESTAMP verändert werden, muss die Form "yyyy-mm-dd hh:mm:ss" eingehalten werden. Anderenfalls wird der + geänderte $timestamp nicht übernommen. + Zusätzlich kann durch Setzen der Variable "$IGNORE=1" ein Datensatz vom Logging ausgeschlossen werden.

      + + Beispiele
      - attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} -
      - # ändert den Reading-Wert des Gerätes "living_Clima" von "off" zu "0" und rundet den Wert vom Gerät "e-power"

      - - attr <device> valueFn {if ($DEVICE eq "SMA_Energymeter" && $READING eq "state"){$IGNORE=1;}} -
      - # der Datensatz wird nicht geloggt wenn Device = "SMA_Energymeter" und das Reading = "state" ist

      - - attr <device> valueFn {if ($DEVICE eq "Dum.Energy" && $READING eq "TotalConsumption"){$UNIT="W";}} -
      - # setzt die Einheit des Devices "Dum.Energy" auf "W" wenn das Reading = "TotalConsumption" ist

      + attr <device> valueFn {if ($DEVICE eq "living_Clima" && $VALUE eq "off" ){$VALUE=0;} elsif ($DEVICE eq "e-power"){$VALUE= sprintf "%.1f", $VALUE;}} +
      + # ändert den Reading-Wert des Gerätes "living_Clima" von "off" zu "0" und rundet den Wert vom Gerät "e-power"

      + + attr <device> valueFn {if ($DEVICE eq "SMA_Energymeter" && $READING eq "state"){$IGNORE=1;}} +
      + # der Datensatz wird nicht geloggt wenn Device = "SMA_Energymeter" und das Reading = "state" ist

      + + attr <device> valueFn {if ($DEVICE eq "Dum.Energy" && $READING eq "TotalConsumption"){$UNIT="W";}} +
      + # setzt die Einheit des Devices "Dum.Energy" auf "W" wenn das Reading = "TotalConsumption" ist

  • @@ -8833,20 +8921,20 @@ attr SMA_Energymeter DbLogValueFn
  • verbose4Devs
      - - attr <device> verbose4Devs <device1>,<device2>,<device..> -
      + + attr <device> verbose4Devs <device1>,<device2>,<device..> +
      - Mit verbose Level 4 werden nur Ausgaben bezüglich der in diesem Attribut aufgeführten Devices im Logfile protokolliert. Ohne dieses + Mit verbose Level 4 werden nur Ausgaben bezüglich der in diesem Attribut aufgeführten Devices im Logfile protokolliert. Ohne dieses Attribut werden mit verbose 4 Ausgaben aller relevanten Devices im Logfile protokolliert. - Die angegebenen Devices werden als Regex ausgewertet.

      - - Beispiel
      + Die angegebenen Devices werden als Regex ausgewertet.

      + + Beispiel
      - attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global -
      - # Es werden Devices beginnend mit "sys", "Cam" bzw. Devices die "5000" enthalten und das Device "global" protokolliert falls verbose=4 - eingestellt ist.
      + attr <device> verbose4Devs sys.*,.*5000.*,Cam.*,global +
      + # Es werden Devices beginnend mit "sys", "Cam" bzw. Devices die "5000" enthalten und das Device "global" protokolliert falls verbose=4 + eingestellt ist.