From 84cf72774b836a9bba99a1a409df4ace60f8bde4 Mon Sep 17 00:00:00 2001 From: nasseeder1 Date: Wed, 11 Jan 2023 18:55:22 +0000 Subject: [PATCH] 93_DbRep: write TYPE uppercase with writeToDB option git-svn-id: https://svn.fhem.de/fhem/trunk@27026 2b470e98-0d58-463d-a4d8-8e2adae1ed80 --- fhem/CHANGED | 2 + fhem/FHEM/93_DbRep.pm | 685 +++++++++++++++++++++--------------------- 2 files changed, 352 insertions(+), 335 deletions(-) diff --git a/fhem/CHANGED b/fhem/CHANGED index b8d4fe2be..6e9973ae2 100644 --- a/fhem/CHANGED +++ b/fhem/CHANGED @@ -1,5 +1,7 @@ # Add changes at the top of the list. Keep it in ASCII, and 80-char wide. # Do not insert empty lines here, update check depends on it. + - change: 93_DbRep: write TYPE uppercase with writeToDB option, + Commandref edited - bugfix: 59_Weather: fix DarkSky API wrong funtion call - feature: 72_FRITZBOX: merged old 72_FRITZBOX with last Fork Version - bugfix: 59_Weather: bugfix function WeatherAsHtmlD not working diff --git a/fhem/FHEM/93_DbRep.pm b/fhem/FHEM/93_DbRep.pm index 9baad3f6d..b43508798 100644 --- a/fhem/FHEM/93_DbRep.pm +++ b/fhem/FHEM/93_DbRep.pm @@ -59,6 +59,7 @@ no if $] >= 5.017011, warnings => 'experimental::smartmatch'; # Version History intern my %DbRep_vNotesIntern = ( + "8.51.1" => "01.01.2023 write TYPE uppercase with writeToDB option, Commandref edited ", "8.51.0" => "02.01.2023 online formatting of sqlCmd, sqlCmdHistory, sqlSpecial, Commandref edited, get dbValue removed ". "sqlCmdBlocking customized like sqlCmd, bugfix avgTimeWeightMean ", "8.50.10" => "01.01.2023 Commandref edited ", @@ -12713,8 +12714,11 @@ sub DbRep_OutputWriteToDB { if(!$dbloghash->{HELPER}{COLSET}) { $err = "No result of \"$hash->{LASTCMD}\" to database written. Cause: column width in \"$hash->{DEF}\" isn't set"; + Log3 ($name, 2, "DbRep $name - ERROR - $err"); + $err = encode_base64($err,""); + return ($err,$wrt,$irowdone); } @@ -12723,7 +12727,7 @@ sub DbRep_OutputWriteToDB { my $aggr = (DbRep_checktimeaggr($hash))[2]; $reading = $optxt."_".$aggr."_".AttrVal($name, "readingNameMap", $reading); - $type = $defs{$device}{TYPE} if($defs{$device}); # $type vom Device ableiten + $type = uc($defs{$device}{TYPE}) if($defs{$device}); # $type vom Device übernehmen if($optxt =~ /avg|sum/) { my @arr = split("\\|", $wrstr); @@ -12770,7 +12774,6 @@ sub DbRep_OutputWriteToDB { } if (defined $value) { - # Daten auf maximale Länge beschneiden (DbLog-Funktion !) ($device,$type,$event,$reading,$value,$unit) = DbLog_cutCol($dbloghash,$device,$type,$event,$reading,$value,$unit); if($i == 0) { @@ -12801,9 +12804,9 @@ sub DbRep_OutputWriteToDB { my %rh = split("§", $wrstr); for my $key (sort(keys(%rh))) { - my @k = split("\\|",$rh{$key}); - $value = defined($k[1])?sprintf("%.${ndp}f",$k[1]):undef; - $rsf = $k[2]; # Datum / Zeit für DB-Speicherung + my @k = split("\\|",$rh{$key}); + $value = defined($k[1])?sprintf("%.${ndp}f",$k[1]):undef; + $rsf = $k[2]; # Datum / Zeit für DB-Speicherung ($date,$time) = split("_",$rsf); $time =~ s/-/:/g if($time); @@ -12820,7 +12823,6 @@ sub DbRep_OutputWriteToDB { } } if ($value) { - # Daten auf maximale Länge beschneiden (DbLog-Funktion !) ($device,$type,$event,$reading,$value,$unit) = DbLog_cutCol($dbloghash,$device,$type,$event,$reading,$value,$unit); push(@row_array, "$date $time|$device|$type|$event|$reading|$value|$unit"); } @@ -12831,8 +12833,11 @@ sub DbRep_OutputWriteToDB { eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoCommit => 1, mysql_enable_utf8 => $utf8 });}; if ($@) { $err = $@; + Log3 ($name, 2, "DbRep $name - ERROR - $@"); + $err = encode_base64($err,""); + return ($err,$wrt,$irowdone); } @@ -12894,8 +12899,7 @@ sub DbRep_OutputWriteToDB { Log3($name, 2, "DbRep $name - ERROR - $@"); } - # SQL-Startzeit - my $wst = [gettimeofday]; + my $wst = [gettimeofday]; # SQL-Startzeit my $ihs = 0; my $uhs = 0; @@ -12910,8 +12914,7 @@ sub DbRep_OutputWriteToDB { $value = $a[5]; $unit = $a[6]; - eval { - # update oder insert history + eval { # update oder insert history if (lc($DbLogType) =~ m(history) ) { my $rv_uh = $dbh->do("UPDATE history SET TIMESTAMP=\"$timestamp\", DEVICE=\"$device\", READING=\"$reading\", TYPE=\"$type\", EVENT=\"$event\", VALUE=\"$value\", UNIT=\"$unit\" WHERE TIMESTAMP=\"$timestamp\" AND DEVICE=\"$device\" AND READING=\"$reading\""); $uhs += $rv_uh if($rv_uh); @@ -12924,8 +12927,8 @@ sub DbRep_OutputWriteToDB { Log3 $hash->{NAME}, 4, "DbRep $name - INSERT history: $row, RESULT: $rv_ih"; } } - # update oder insert current - if (lc($DbLogType) =~ m(current) ) { + + if (lc($DbLogType) =~ m(current) ) { # update oder insert current my $rv_uc = $dbh->do("UPDATE current SET TIMESTAMP=\"$timestamp\", DEVICE=\"$device\", READING=\"$reading\", TYPE=\"$type\", EVENT=\"$event\", VALUE=\"$value\", UNIT=\"$unit\" WHERE DEVICE=\"$device\" AND READING=\"$reading\""); if ($rv_uc == 0) { $sth_ic->execute($timestamp,$device,$type,$event,$reading,$value,$unit); @@ -12935,10 +12938,14 @@ sub DbRep_OutputWriteToDB { if ($@) { $err = $@; + Log3 ($name, 2, "DbRep $name - ERROR - $@"); + $dbh->rollback; $dbh->disconnect; + $err = encode_base64($err,""); + return ($err,$wrt,0); } } @@ -12951,8 +12958,7 @@ sub DbRep_OutputWriteToDB { Log3 ($hash->{NAME}, 3, "DbRep $name - number of lines inserted into \"$dblogname\": $ihs"); $irowdone = $ihs + $uhs; - # SQL-Laufzeit ermitteln - $wrt = tv_interval($wst); + $wrt = tv_interval($wst); # SQL-Laufzeit ermitteln } return ($err,$wrt,$irowdone); @@ -14223,172 +14229,174 @@ return;
-
  • dumpMySQL [clientSide | serverSide] - - creates a dump of the connected MySQL database.
    - Depending from selected option the dump will be created on Client- or on Server-Side.
    - The variants differs each other concerning the executing system, the creating location, the usage of - attributes, the function result and the needed hardware ressources.
    - The option "clientSide" e.g. needs more powerful FHEM-Server hardware, but saves all available - tables inclusive possibly created views.
    - With attribute "dumpCompress" a compression of dump file after creation can be switched on. -

    +
  • dumpMySQL [clientSide | serverSide]

    + + Creates a dump of the connected MySQL database.
    + Depending from selected option the dump will be created on Client- or on Server-Side.
    + The variants differs each other concerning the executing system, the creating location, the usage of + attributes, the function result and the needed hardware ressources.
    + The option "clientSide" e.g. needs more powerful FHEM-Server hardware, but saves all available + tables inclusive possibly created views.
    + With attribute "dumpCompress" a compression of dump file after creation can be switched on. +

    - +
  • +
  • dumpSQLite - creates a dump of the connected SQLite database.
    This function uses the SQLite Online Backup API and allow to create a consistent backup of the @@ -15923,29 +15931,31 @@ sub bdump { new operation starts

  • -
  • seqDoubletsVariance <positive variance [negative variance] [EDGE=negative|positive]>
    - Accepted variance for the command "set <name> delSeqDoublets".
    - The value of this attribute describes the variance up to consecutive numeric values (VALUE) of - datasets are handled as identical. If only one numeric value is declared, it is used as - postive as well as negative variance and both form the "deletion corridor". - Optional a second numeric value for a negative variance, separated by blank,can be - declared. - Always absolute, i.e. positive numeric values, have to be declared.
    - If the supplement "EDGE=negative" is declared, values at a negative edge (e.g. when - value is changed from 4.0 -> 1.0) are not deleted although they are in the "deletion corridor". - Equivalent is valid with "EDGE=positive" for the positive edge (e.g. the change - from 1.2 -> 2.8). -

    +
  • seqDoubletsVariance <positive variance [negative variance] [EDGE=negative|positive]>

    + + Accepted variance for the command "set <name> delSeqDoublets".
    + The value of this attribute describes the variance up to consecutive numeric values (VALUE) of + datasets are handled as identical. If only one numeric value is declared, it is used as + postive as well as negative variance and both form the "deletion corridor". + Optional a second numeric value for a negative variance, separated by blank,can be + declared. + Always absolute, i.e. positive numeric values, have to be declared.
    + If the supplement "EDGE=negative" is declared, values at a negative edge (e.g. when + value is changed from 4.0 -> 1.0) are not deleted although they are in the "deletion corridor". + Equivalent is valid with "EDGE=positive" for the positive edge (e.g. the change + from 1.2 -> 2.8). +

    - -

    -
  • + +
    +
    +
  • showproctime - if set, the reading "sql_processing_time" shows the required execution time (in seconds) @@ -17088,172 +17098,174 @@ return;

  • -
  • dumpMySQL [clientSide | serverSide] - - erstellt einen Dump der angeschlossenen MySQL-Datenbank.
    - Abhängig von der ausgewählten Option wird der Dump auf der Client- bzw. Serverseite erstellt.
    - Die Varianten unterscheiden sich hinsichtlich des ausführenden Systems, des Erstellungsortes, der - Attributverwendung, des erzielten Ergebnisses und der benötigten Hardwareressourcen.
    - Die Option "clientSide" benötigt z.B. eine leistungsfähigere Hardware des FHEM-Servers, sichert aber alle - Tabellen inklusive eventuell angelegter Views.
    - Mit dem Attribut "dumpCompress" kann eine Komprimierung der erstellten Dumpfiles eingeschaltet werden. -

    +
  • dumpMySQL [clientSide | serverSide]

    + + Erstellt einen Dump der angeschlossenen MySQL-Datenbank.
    + Abhängig von der ausgewählten Option wird der Dump auf der Client- bzw. Serverseite erstellt.
    + Die Varianten unterscheiden sich hinsichtlich des ausführenden Systems, des Erstellungsortes, der + Attributverwendung, des erzielten Ergebnisses und der benötigten Hardwareressourcen.
    + Die Option "clientSide" benötigt z.B. eine leistungsfähigere Hardware des FHEM-Servers, sichert aber alle + Tabellen inklusive eventuell angelegter Views.
    + Mit dem Attribut "dumpCompress" kann eine Komprimierung der erstellten Dumpfiles eingeschaltet werden. +

    - +
  • +
  • dumpSQLite - erstellt einen Dump der angeschlossenen SQLite-Datenbank.
    Diese Funktion nutzt die SQLite Online Backup API und ermöglicht es konsistente Backups der SQLite-DB @@ -18535,6 +18547,7 @@ sub dbval {
  • dumpDirLocal

    +