diff --git a/fhem/CHANGED b/fhem/CHANGED index a0c3c28bf..31a813810 100644 --- a/fhem/CHANGED +++ b/fhem/CHANGED @@ -1,5 +1,11 @@ # Add changes at the top of the list. Keep it in ASCII, and 80-char wide. # Do not insert empty lines here, update check depends on it. + - feature: 93_DbRep: V8.4.0, reduceLog from DbLog integrated into DbRep, + sqlCmd/dbValue with textField-long as default, both + attributes timeOlderThan / timeDiffToNow can be set, + get versionNotes changed to support en/de, new attribute + "countEntriesDetail", countEntries separate count of + every reading if attribute "countEntriesDetail" is set - change: 49_SSCam: COMPATIBILITY changed to 8.2.1 - bugfix: 74_XiaomiBTLESens : fix warning Illegal hexadecimal digit ignored diff --git a/fhem/FHEM/93_DbRep.pm b/fhem/FHEM/93_DbRep.pm index 6e32da738..f60fd4922 100644 --- a/fhem/FHEM/93_DbRep.pm +++ b/fhem/FHEM/93_DbRep.pm @@ -27,6 +27,8 @@ # Credits: # - viegener for some input # - some proposals to boost and improve SQL-Statements by JoeALLb +# - function reduceLog created by Claudiu Schuster (rapster) was copied from DbLog (Version 3.12.3 08.10.2018) +# and changed to meet the requirements of DbRep # ########################################################################################################################### # @@ -39,12 +41,28 @@ package main; use strict; use warnings; +use POSIX qw(strftime); +use Time::HiRes qw(gettimeofday tv_interval); +use Scalar::Util qw(looks_like_number); +eval "use DBI;1" or my $DbRepMMDBI = "DBI"; +use DBI::Const::GetInfoType; +use Blocking; +use Color; # colorpicker Widget +use Time::Local; +use Encode qw(encode_utf8); +use IO::Compress::Gzip qw(gzip $GzipError); +use IO::Uncompress::Gunzip qw(gunzip $GunzipError); +# no if $] >= 5.018000, warnings => 'experimental'; +no if $] >= 5.017011, warnings => 'experimental::smartmatch'; # Versions History intern our %DbRep_vNotesIntern = ( - "8.2.3" => "07.10.2018 check availability of DbLog-device at definition time of DbRep-device ", - "8.2.2" => "07.10.2018 DbRep_getMinTs changed, fix don't get the real min timestamp in rare cases ", - "8.2.1" => "07.10.2018 \$hash->{dbloghash}{HELPER}{REOPEN_RUNS_UNTIL} contains the time until DB is closed ", + "8.4.0" => "22.10.2018 countEntries separately for every reading if attribute \"countEntriesDetail\" is set, ". + "versionNotes changed to support en/de, get dbValue as textfield-long ", + "8.3.0" => "17.10.2018 reduceLog from DbLog integrated into DbRep, textField-long as default for sqlCmd, both attributes timeOlderThan and timeDiffToNow can be set at same time", + "8.2.3" => "07.10.2018 check availability of DbLog-device at definition time of DbRep-device ", + "8.2.2" => "07.10.2018 DbRep_getMinTs changed, fix don't get the real min timestamp in rare cases ", + "8.2.1" => "07.10.2018 \$hash->{dbloghash}{HELPER}{REOPEN_RUNS_UNTIL} contains time until DB is closed ", "8.2.0" => "05.10.2018 direct help for attributes ", "8.1.0" => "02.10.2018 new get versionNotes command ", "8.0.1" => "20.09.2018 DbRep_getMinTs improved", @@ -96,174 +114,17 @@ our %DbRep_vNotesIntern = ( "7.3.1" => "08.01.2018 fix syntax error for perl < 5.20 ", "7.3.0" => "07.01.2018 DbRep-charfilter avoid control characters in datasets to export, impfile_Push errortext improved, expfile_DoParse changed to use aggregation for split selects in timeslices (avoid heavy memory consumption) ", "7.2.1" => "04.01.2018 bugfix month out of range that causes fhem crash ", - "7.2.0" => "27.12.2017 new attribute 'seqDoubletsVariance' ", - "7.1.0" => "22.12.2017 new attribute timeYearPeriod for reports correspondig to e.g. electricity billing, bugfix connection check is running after restart allthough dev is disabled ", - "7.0.0" => "18.12.2017 don't set \$runtime_string_first,\$runtime_string_next,\$ts if time/aggregation-attributes not set, change_Push redesigned, new command get blockinginfo, identify if reopen is running on dblog-device and postpone the set-command ", - "6.4.3" => "17.12.2017 bugfix in delSeqDoublets, fetchrows if datasets contain characters like \"' and s.o. ", - "6.4.2" => "15.12.2017 change 'delSeqDoublets' to respect attribute 'limit' (adviceDelete,adviceRemain), commandref revised ", - "6.4.1" => "13.12.2017 new Attribute 'sqlResultFieldSep' for field separate options of sqlCmd result ", - "6.4.0" => "10.12.2017 prepare module for usage of datetime picker widget (Forum:#35736) ", - "6.3.2" => "05.12.2017 make direction of fetchrows switchable ASC <-> DESC by attribute fetchRoute ", - "6.3.1" => "04.12.2017 fix DBD::mysql::st execute failed: Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'DEVELfhem.history.TIMESTAMP' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by Forum:https://forum.fhem.de/index.php/topic,65860.msg725595.html#msg725595 , fix currentfillup_Push PostgreSQL -> use \$runtime_string_next as Timestring during current insert ", - "6.3.0" => "04.12.2017 support addition format d:xx h:xx m:xx s:xx for attributes timeDiffToNow, timeOlderThan ", - "6.2.3" => "04.12.2017 fix localtime(time); (current time deduction) in DbRep_createTimeArray ", - "6.2.2" => "01.12.2017 support all aggregations for delSeqDoublets, better output filesize when mysql dump finished ", - "6.2.1" => "30.11.2017 support delSeqDoublets without device,reading is set and support device-devspec, reading list, minor fixes in delSeqDoublets ", - "6.2.0" => "29.11.2017 enhanced command delSeqDoublets by 'delete' ", - "6.1.0" => "29.11.2017 new command delSeqDoublets (adviceRemain,adviceDelete), add Option to LASTCMD ", - "6.0.0" => "18.11.2017 FTP transfer dumpfile after dump, delete old dumpfiles within Blockingcall (avoid freezes) commandref revised, minor fixes ", - "5.8.6" => "30.10.2017 don't limit attr reading, device if the attr contains a list ", - "5.8.5" => "19.10.2017 filter unwanted characters in 'procinfo'-result ", - "5.8.4" => "17.10.2017 DbRep_createSelectSql, DbRep_createDeleteSql, currentfillup_Push switch to devspec ", - "5.8.3" => "16.10.2017 change to use DbRep_createSelectSql: minValue,diffValue - DbRep_createDeleteSql: delEntries ", - "5.8.2" => "15.10.2017 sub DbRep_createTimeArray ", - "5.8.1" => "15.10.2017 change to use DbRep_createSelectSql: sumValue,averageValue,exportToFile,maxValue ", - "5.8.0" => "15.10.2017 adapt DbRep_createSelectSql for better performance if time/aggregation not set, can set table as flexible argument for countEntries, fetchrows (default: history), minor fixes ", - "5.7.1" => "13.10.2017 tableCurrentFillup fix for PostgreSQL, commandref revised ", - "5.7.0" => "09.10.2017 tableCurrentPurge, tableCurrentFillup ", - "5.6.4" => "05.10.2017 abortFn's adapted to use abortArg (Forum:77472) ", - "5.6.3" => "01.10.2017 fix crash of fhem due to wrong rmday-calculation if month is changed, Forum:#77328 ", - "5.6.2" => "28.08.2017 commandref revised ", - "5.6.1" => "18.07.2017 commandref revised, minor fixes ", - "5.6.0" => "17.07.2017 default timeout changed to 86400, new get-command 'procinfo' (MySQL) ", - "5.5.2" => "16.07.2017 dbmeta_DoParse -> show variables (no global) ", - "5.5.1" => "16.07.2017 wrong text output in state when restoreMySQL was aborted by timeout ", - "5.5.0" => "10.07.2017 replace \$hash->{dbloghash}{DBMODEL} by \$hash->{dbloghash}{MODEL} (DbLog was changed) ", - "5.4.0" => "03.07.2017 restoreMySQL - restore of csv-files (from dumpServerSide), RestoreRowsHistory/ DumpRowsHistory, Commandref revised ", - "5.3.1" => "28.06.2017 vacuum for SQLite added, readings enhanced for optimizeTables / vacuum, commandref revised ", - "5.3.0" => "26.06.2017 change of DbRep_mysqlOptimizeTables, new command optimizeTables ", - "5.2.1" => "25.06.2017 bugfix in sqlCmd_DoParse (PRAGMA, UTF8, SHOW) ", - "5.2.0" => "14.06.2017 UTF-8 support for MySQL (fetchrows, srvinfo, expfile, impfile, insert) ", - "5.1.0" => "13.06.2017 column 'UNIT' added to fetchrow result ", - "5.0.6" => "13.06.2017 add Aria engine to DbRep_mysqlOptimizeTables ", - "5.0.5" => "12.06.2017 bugfixes in DbRep_DumpAborted, some changes in dumpMySQL, optimizeTablesBeforeDump added to mysql_DoDumpServerSide, new reading DumpFileCreatedSize ", - "5.0.4" => "09.06.2017 some improvements and changes of mysql_DoDump, commandref revised, new attributes executeBeforeDump, executeAfterDump ", - "5.0.3" => "07.06.2017 mysql_DoDumpServerSide added ", - "5.0.2" => "06.06.2017 little improvements in mysql_DoDumpClientSide ", - "5.0.1" => "05.06.2017 dependencies between dumpMemlimit and dumpSpeed created, enhanced verbose 5 logging ", - "5.0.0" => "04.06.2017 MySQL Dump nonblocking added ", - "4.16.1" => "22.05.2017 encode json without JSON module, requires at least fhem.pl 14348 2017-05-22 20:25:06Z ", - "4.16.0" => "22.05.2017 format json as option of sqlResultFormat, state will never be deleted in 'DbRep_delread' ", - "4.15.1" => "20.05.2017 correction of commandref ", - "4.15.0" => "17.05.2017 SUM(VALUE),AVG(VALUE) recreated for PostgreSQL, Code reviewed and optimized ", - "4.14.2" => "16.05.2017 SQL-Statements optimized for Wildcard '%' usage if used, Wildcard '_' isn't supported furthermore, \"averageValue\", \"sumValue\", \"maxValue\", \"minValue\", \"countEntries\" performance optimized, commandref revised ", - "4.14.1" => "16.05.2017 limitation of fetchrows result datasets to 1000 by attr limit ", - "4.14.0" => "15.05.2017 UserExitFn added as separate sub (DbRep_userexit) and attr userExitFn defined, new subs ReadingsBulkUpdateTimeState, ReadingsBulkUpdateValue, ReadingsSingleUpdateValue, commandref revised ", - "4.13.7" => "11.05.2017 attribute sqlResultSingleFormat became sqlResultFormat, sqlResultSingle deleted and sqlCmd contains now all format possibilities (separated,mline,sline,table), commandref revised ", - "4.13.6" => "10.05.2017 minor changes ", - "4.13.5" => "09.05.2017 cover dbh prepare in eval to avoid crash (sqlResult_DoParse) ", - "4.13.4" => "09.05.2017 attribute sqlResultSingleFormat: mline sline table, attribute 'allowDeletion' is now also valid for sqlResult, sqlResultSingle and delete command is forced ", - "4.13.3" => "09.05.2017 flexible format of reading SqlResultRow_xxx for proper and sort sequence ", - "4.13.2" => "09.05.2017 sqlResult, sqlResultSingle are able to execute delete, insert, update commands error corrections ", - "4.13.1" => "09.05.2017 change substitution in sqlResult, sqlResult_DoParse ", - "4.13.0" => "09.05.2017 acceptance of viegener change with some corrections (separating lines with ]|[ in Singleline) ", - "4.12.3" => "07.05.2017 New sets sqlSelect execute arbitrary sql command returning each row as single reading (fields separated with |) allowing replacement of timestamp values according to attribute definition --> §timestamp_begin§ etc and sqlSelectSingle for executing an sql command returning a single reading (separating lines with §) ", - "4.12.2" => "17.04.2017 DbRep_checkUsePK changed ", - "4.12.1" => "07.04.2017 get tableinfo changed for MySQL ", - "4.12.0" => "31.03.2017 support of primary key for insert functions ", - "4.11.4" => "29.03.2017 bugfix timestamp in minValue, maxValue if VALUE contains more than one numeric value (like in sysmon) ", - "4.11.3" => "26.03.2017 usage of daylight saving time changed to avoid wrong selection when wintertime switch to summertime, minor bug fixes ", - "4.11.2" => "16.03.2017 bugfix in func dbmeta_DoParse (SQLITE_DB_FILENAME) ", - "4.11.1" => "28.02.2017 commandref completed ", - "4.11.0" => "18.02.2017 added [current|previous]_[month|week|day|hour]_begin and [current|previous]_[month|week|day|hour]_end as options of timestamp ", - "4.10.3" => "01.02.2017 rename reading 'diff-overrun_limit-' to 'diff_overrun_limit_', DbRep_collaggstr day aggregation changed back from 4.7.5 change ", - "4.10.2" => "16.01.2017 bugfix uninitialized value \$renmode if RenameAgent ", - "4.10.1" => "30.11.2016 bugfix importFromFile format problem if UNIT-field wasn't set ", - "4.10.0" => "28.12.2016 del_DoParse changed to use Wildcards, del_ParseDone changed to use readingNameMap ", - "4.9.0" => "23.12.2016 function readingRename added ", - "4.8.6" => "17.12.2016 new bugfix group by-clause due to incompatible changes made in MyQL 5.7.5 (Forum #msg541103) ", - "4.8.5" => "16.12.2016 bugfix group by-clause due to Forum #msg540610 ", - "4.8.4" => "13.12.2016 added 'group by ...,table_schema' to select in dbmeta_DoParse due to Forum #msg539228, commandref adapted, changed 'not_enough_data_in_period' to 'less_data_in_period' ", - "4.8.3" => "12.12.2016 balance diff to next period if value of period is 0 between two periods with values ", - "4.8.2" => "10.12.2016 bugfix negativ diff if balanced ", - "4.8.1" => "10.12.2016 added balance diff to diffValue, a difference between the last value of an old aggregation period to the first value of a new aggregation period will be take over now ", - "4.8.0" => "09.12.2016 diffValue selection chenged to 'between' ", - "4.7.7" => "08.12.2016 code review ", - "4.7.6" => "07.12.2016 DbRep version as internal, check if perl module DBI is installed ", - "4.7.5" => "05.12.2016 DbRep_collaggstr day aggregation changed ", - "4.7.4" => "28.11.2016 sub DbRep_calcount changed due to Forum #msg529312 ", - "4.7.3" => "20.11.2016 new diffValue function made suitable to SQLite ", - "4.7.2" => "20.11.2016 commandref adapted, state = Warnings adapted ", - "4.7.1" => "17.11.2016 changed fieldlength to DbLog new standard, diffValue state Warnings due to several situations and generate readings not_enough_data_in_period, diff-overrun_limit ", - "4.7.0" => "16.11.2016 sub diffValue changed due to Forum #msg520154, attr diffAccept added, diffValue now able to calculate if counter was going to 0 ", - "4.6.0" => "31.10.2016 bugfix calc issue due to daylight saving time end (winter time) ", - "4.5.1" => "18.10.2016 get svrinfo contains SQLite database file size (MB), modified timeout routine ", - "4.5.0" => "17.10.2016 get data of dbstatus, dbvars, tableinfo, svrinfo (database dependend) ", - "4.4.0" => "13.10.2016 get function prepared ", - "4.3.0" => "11.10.2016 Preparation of get metadata ", - "4.2.0" => "10.10.2016 allow SQL-Wildcards (% _) in attr reading & attr device ", - "4.1.3" => "09.10.2016 bugfix delEntries running on SQLite ", - "4.1.2" => "08.10.2016 old device in DEF of connected DbLog device will substitute by renamed device if it is present in DEF ", - "4.1.1" => "06.10.2016 NotifyFn is getting events from global AND own device, set is reduced if ROLE=Agent, english commandref enhanced ", - "4.1.0" => "05.10.2016 DbRep_Attr changed ", - "4.0.0" => "04.10.2016 Internal/Attribute ROLE added, sub DbRep_firstconnect changed NotifyFN activated to start deviceRename if ROLE=Agent ", - "3.13.0" => "03.10.2016 added deviceRename to rename devices in database, new Internal DATABASE ", - "3.12.0" => "02.10.2016 function minValue added ", - "3.11.1" => "30.09.2016 bugfix include first and next day in calculation if Timestamp is exactly 'YYYY-MM-DD 00:00:00' ", - "3.11.0" => "29.09.2016 maxValue calculation moved to background to reduce FHEM-load ", - "3.10.1" => "28.09.2016 sub impFile -> changed \$dbh->{AutoCommit} = 0 to \$dbh->begin_work ", - "3.10.0" => "27.09.2016 diffValue calculation moved to background to reduce FHEM-load, new reading background_processing_time ", - "3.9.1" => "27.09.2016 Internal 'LASTCMD' added ", - "3.9.0" => "26.09.2016 new function importFromFile to import data from file (CSV format) ", - "3.8.0" => "16.09.2016 new attr readingPreventFromDel to prevent readings from deletion when a new operation starts ", - "3.7.3" => "11.09.2016 changed format of diffValue-reading if no value was selected ", - "3.7.2" => "04.09.2016 problem in diffValue fixed if if no value was selected ", - "3.7.1" => "31.08.2016 Reading 'errortext' added, commandref continued, exportToFile changed, diffValue changed to fix wrong timestamp if error occur ", - "3.7.0" => "30.08.2016 exportToFile added exports data to file (CSV format) ", - "3.6.0" => "29.08.2016 plausibility checks of database column character length ", - "3.5.2" => "21.08.2016 fit to new commandref style ", - "3.5.1" => "20.08.2016 commandref continued ", - "3.5.0" => "18.08.2016 new attribute timeOlderThan ", - "3.4.4" => "12.08.2016 current_year_begin, previous_year_begin, current_year_end, previous_year_end added as possible values for timestmp attribute ", - "3.4.3" => "09.08.2016 fields for input using 'insert' changed to 'date,time,value,unit'. Attributes device, reading will be used to complete dataset, now more informations available about faulty datasets in arithmetic operations ", - "3.4.2" => "05.08.2016 commandref complemented, fieldlength used in function 'insert' trimmed to 32 ", - "3.4.1" => "04.08.2016 check of numeric value type in functions maxvalue, diffvalue ", - "3.4.0" => "03.08.2016 function 'insert' added ", - "3.3.3" => "16.07.2016 bugfix of aggregation=week if month start is 01 and month end is 12 AND the last week of december is '01' like in 2014 (checked in version 11804) ", - "3.3.2" => "16.07.2016 readings completed with begin of selection range to ensure valid reading order, also done if readingNameMap is set ", - "3.3.1" => "15.07.2016 function 'diffValue' changed, write '-' if no value ", - "3.3.0" => "12.07.2016 function 'diffValue' added ", - "3.2.1" => "12.07.2016 DbRep_Notify prepared, switched from readingsSingleUpdate to readingsBulkUpdate ", - "3.2.0" => "11.07.2016 handling of db-errors is relocated to blockingcall-subs (checked in version 11785) ", - "3.1.1" => "10.07.2016 state turns to initialized and connected after attr 'disabled' is switched from '1' to '0' ", - "3.1.0" => "09.07.2016 new Attr 'timeDiffToNow' and change subs according to that ", - "3.0.0" => "04.07.2016 no selection if timestamp isn't set and aggregation isn't set with fetchrows, delEntries ", - "2.9.9" => "03.07.2016 english version of commandref completed ", - "2.9.8" => "01.07.2016 changed fetchrows_ParseDone to handle readingvalues with whitespaces correctly ", - "2.9.7" => "30.06.2016 moved {DBLOGDEVICE} to {HELPER}{DBLOGDEVICE} ", - "2.9.6" => "30.06.2016 sql-call changed for countEntries, averageValue, sumValue avoiding problems if no timestamp is set and aggregation is set ", - "2.9.5" => "30.06.2016 format of readingnames changed again (substitute ':' with '-' in time) ", - "2.9.4" => "30.06.2016 change readingmap to readingNameMap, prove of unsupported characters added ", - "2.9.3" => "27.06.2016 format of readingnames changed avoiding some problems after restart and splitting ", - "2.9.2" => "27.06.2016 use Time::Local added, DbRep_firstconnect added ", - "2.9.1" => "26.06.2016 german commandref added ", - "2.9.0" => "25.06.2016 attributes showproctime, timeout added ", - "2.8.1" => "24.06.2016 sql-creation of sumValue, maxValue, fetchrows changed main-routine changed ", - "2.8.0" => "24.06.2016 function averageValue changed to nonblocking function ", - "2.7.1" => "24.06.2016 changed blockingcall routines, changed to unique abort-function ", - "2.7.0" => "23.06.2016 changed function countEntries to nonblocking ", - "2.6.3" => "22.06.2016 abort-routines changed, dbconnect-routines changed ", - "2.6.2" => "21.06.2016 aggregation week corrected ", - "2.6.1" => "20.06.2016 routine maxval_ParseDone corrected ", - "2.6.0" => "31.05.2016 maxValue changed to nonblocking function ", - "2.5.3" => "31.05.2016 function delEntries changed ", - "2.5.2" => "31.05.2016 ping check changed, DbRep_Connect changed ", - "2.5.1" => "30.05.2016 sleep in nb-functions deleted ", - "2.5.0" => "30.05.2016 changed to use own \$dbh with DbLog-credentials, function sumValue, fetchrows ", - "2.4.2" => "29.05.2016 function sumValue changed ", - "2.4.1" => "29.05.2016 function fetchrow changed ", - "2.4.0" => "29.05.2016 changed to nonblocking function for sumValue ", - "2.3.0" => "28.05.2016 changed sumValue to 'prepare' with placeholders ", - "2.2.0" => "27.05.2016 changed fetchrow and delEntries function to 'prepare' with placeholders added nonblocking function for delEntries ", - "2.1.0" => "25.05.2016 codechange ", - "2.0.0" => "24.05.2016 added nonblocking function for fetchrow ", - "1.2.0" => "21.05.2016 function and attribute for delEntries added ", - "1.1.0" => "20.05.2016 change result-format of 'count', move runtime-counter to sub DbRep_collaggstr ", "1.0.0" => "19.05.2016 Initial" ); # Versions History extern: our %DbRep_vNotesExtern = ( + "8.4.0" => "22.10.2018 New attribute \"countEntriesDetail\". Function countEntries creates number of datasets for every ". + "reading separately if attribute \"countEntriesDetail\" is set. Get versionNotes changed to support en/de. ". + "Function \"get dbValue\" opens an editor window ", + "8.3.0" => "17.10.2018 reduceLog from DbLog integrated into DbRep, textField-long as default for sqlCmd, both attributes ". + "timeOlderThan and timeDiffToNow can be set at same time -> the selection time between timeOlderThan ". + "and timeDiffToNow can be calculated dynamically ", "8.2.2" => "07.10.2018 fix don't get the real min timestamp in rare cases ", "8.2.0" => "05.10.2018 direct help for attributes ", "8.1.0" => "01.10.2018 new get versionNotes command ", @@ -330,7 +191,7 @@ our %DbRep_vNotesExtern = ( "4.7.3" => "20.11.2016 new diffValue function made suitable to SQLite ", "4.6.0" => "31.10.2016 bugfix calc issue due to daylight saving time end (winter time) ", "4.5.1" => "18.10.2016 get svrinfo contains SQLite database file size (MB), modified timeout routine ", - "4.2.0" => "10.10.2016 allow SQL-Wildcards (% _) in attr reading & attr device ", + "4.2.0" => "10.10.2016 allow SQL-Wildcards in attr reading & attr device ", "4.1.3" => "09.10.2016 bugfix delEntries running on SQLite ", "3.13.0" => "03.10.2016 added deviceRename to rename devices in database, new Internal DATABASE ", "3.12.0" => "02.10.2016 function minValue added ", @@ -364,31 +225,21 @@ our %DbRep_vNotesExtern = ( "1.0.0" => "19.05.2016 Initial" ); -# Hint Hash -our %DbRep_vHintsExt = ( +# Hint Hash en +our %DbRep_vHintsExt_en = ( "2" => "Rules of german weather service for calculation of average temperatures. ", - "1" => "Some helpful FHEM-Wiki Entries" + "1" => "Some helpful FHEM-Wiki Entries." ); -use POSIX qw(strftime); -use Time::HiRes qw(gettimeofday tv_interval); -use Scalar::Util qw(looks_like_number); -eval "use DBI;1" or my $DbRepMMDBI = "DBI"; -use DBI::Const::GetInfoType; -use Blocking; -use Color; # colorpicker Widget -use Time::Local; -use Encode qw(encode_utf8); -use IO::Compress::Gzip qw(gzip $GzipError); -use IO::Uncompress::Gunzip qw(gunzip $GunzipError); -# no if $] >= 5.018000, warnings => 'experimental'; -no if $] >= 5.017011, warnings => 'experimental::smartmatch'; +# Hint Hash de +our %DbRep_vHintsExt_de = ( + "2" => "Regularien des deutschen Wetterdienstes zur Berechnung von Durchschnittstemperaturen. ", + "1" => "Hilfreiche Hinweise zu DbRep im FHEM-Wiki." +); sub DbRep_Main($$;$); sub DbLog_cutCol($$$$$$$); # DbLog-Funktion nutzen um Daten auf maximale Länge beschneiden -my $DbRepVersion = "8.0.1"; - my %dbrep_col = ("DEVICE" => 64, "TYPE" => 64, "EVENT" => 512, @@ -415,6 +266,7 @@ sub DbRep_Initialize($) { "reading ". "allowDeletion:1,0 ". "averageCalcForm:avgArithmeticMean,avgDailyMeanGWS,avgTimeWeightMean ". + "countEntriesDetail:1,0 ". "device " . "dumpComment ". "dumpCompress:1,0 ". @@ -575,7 +427,8 @@ sub DbRep_Set($@) { (($hash->{ROLE} ne "Agent")?"fetchrows:history,current ":""). (($hash->{ROLE} ne "Agent")?"diffValue:display,writeToDB ":""). (($hash->{ROLE} ne "Agent")?"insert ":""). - (($hash->{ROLE} ne "Agent")?"sqlCmd ":""). + (($hash->{ROLE} ne "Agent")?"reduceLog ":""). + (($hash->{ROLE} ne "Agent")?"sqlCmd:textField-long ":""). (($hash->{ROLE} ne "Agent" && $hl)?"sqlCmdHistory:".$hl." ":""). (($hash->{ROLE} ne "Agent")?"sqlSpecial:50mostFreqLogsLast2days,allDevCount,allDevReadCount ":""). (($hash->{ROLE} ne "Agent")?"syncStandby ":""). @@ -679,7 +532,26 @@ sub DbRep_Set($@) { DbRep_beforeproc($hash, "delSeq"); DbRep_Main($hash,$opt,$prop); return undef; - } + } + + if ($opt =~ m/reduceLog/ && $hash->{ROLE} ne "Agent") { + if ($hash->{HELPER}{RUNNING_REDUCELOG} && $hash->{HELPER}{RUNNING_REDUCELOG}{pid} !~ m/DEAD/) { + return "reduceLog already in progress. Please wait for the current process to finish."; + } else { + delete $hash->{HELPER}{RUNNING_REDUCELOG}; + my @b = @a; + shift(@b); + $hash->{LASTCMD} = join(" ",@b); + $hash->{HELPER}{REDUCELOG} = \@a; + Log3 ($name, 3, "DbRep $name - ################################################################"); + Log3 ($name, 3, "DbRep $name - ### new reduceLog run ###"); + Log3 ($name, 3, "DbRep $name - ################################################################"); + # Befehl vor Procedure ausführen + DbRep_beforeproc($hash, "reduceLog"); + DbRep_Main($hash,$opt); + return undef; + } + } if ($hash->{HELPER}{RUNNING_BACKUP_CLIENT}) { $setlist = "Unknown argument $opt, choose one of ". @@ -949,12 +821,12 @@ sub DbRep_Get($@) { "svrinfo:noArg ". "blockinginfo:noArg ". "minTimestamp:noArg ". - "dbValue ". + "dbValue:textField-long ". (($dbmodel eq "MYSQL")?"dbstatus:noArg ":""). (($dbmodel eq "MYSQL")?"tableinfo:noArg ":""). (($dbmodel eq "MYSQL")?"procinfo:noArg ":""). (($dbmodel eq "MYSQL")?"dbvars:noArg ":""). - "versionNotes:noArg " + "versionNotes " ; return if(IsDisabled($name)); @@ -1010,59 +882,82 @@ sub DbRep_Get($@) { return $err?$err:$ret; } elsif ($opt =~ /versionNotes/) { - my $header = "Module release information table
"; + my $header = "Module release information
"; my $header1 = "Helpful hints
"; - + my %hs; + # Ausgabetabelle erstellen my ($ret,$val0,$val1); - $ret = ""; - $ret .= sprintf("
$header
"); - $ret .= ""; - $ret .= ""; - $ret .= ""; - my $i = 0; - foreach my $key (reverse sort(keys %DbRep_vNotesExtern)) { - ($val0,$val1) = split(/\s/,$DbRep_vNotesExtern{$key},2); - $ret .= sprintf("" ); - $ret .= ""; - $i++; - if ($i & 1) { - # $i ist ungerade - $ret .= ""; + my $i = 0; + + $ret = ""; + + # Hints + if(!$prop || $prop =~ /hints/ || $prop =~ /[\d]+/) { + $ret .= sprintf("
$header1
"); + $ret .= "
$key $val0 $val1
"; + $ret .= ""; + $ret .= ""; + if($prop && $prop =~ /[\d]+/) { + if(AttrVal("global","language","EN") eq "DE") { + %hs = ( $prop => $DbRep_vHintsExt_de{$prop} ); + } else { + %hs = ( $prop => $DbRep_vHintsExt_en{$prop} ); + } } else { - $ret .= ""; + if(AttrVal("global","language","EN") eq "DE") { + %hs = %DbRep_vHintsExt_de; + } else { + %hs = %DbRep_vHintsExt_en; + } + } + $i = 0; + foreach my $key (reverse sort(keys %hs)) { + $val0 = $hs{$key}; + $ret .= sprintf("" ); + $ret .= ""; + $i++; + if ($i & 1) { + # $i ist ungerade + $ret .= ""; + } else { + $ret .= ""; + } } - } - $ret .= ""; - $ret .= ""; - $ret .= "
$key $val0
"; - $ret .= "
"; - - $ret .= sprintf("
$header1
"); - $ret .= ""; - $ret .= ""; - $ret .= ""; - $i = 0; - foreach my $key (reverse sort(keys %DbRep_vHintsExt)) { - $val0 = $DbRep_vHintsExt{$key}; - $ret .= sprintf("" ); - $ret .= ""; - $i++; - if ($i & 1) { - # $i ist ungerade - $ret .= ""; - } else { - $ret .= ""; + $ret .= ""; + $ret .= ""; + $ret .= "
$key $val0
"; + $ret .= "
"; + } + + # Notes + if(!$prop || $prop =~ /rel/) { + $ret .= sprintf("
$header
"); + $ret .= ""; + $ret .= ""; + $ret .= ""; + $i = 0; + foreach my $key (reverse sort(keys %DbRep_vNotesExtern)) { + ($val0,$val1) = split(/\s/,$DbRep_vNotesExtern{$key},2); + $ret .= sprintf("" ); + $ret .= ""; + $i++; + if ($i & 1) { + # $i ist ungerade + $ret .= ""; + } else { + $ret .= ""; + } } - } - - $ret .= ""; - $ret .= ""; - $ret .= "
$key $val0 $val1
"; - $ret .= "
"; - $ret .= ""; + $ret .= ""; + $ret .= ""; + $ret .= ""; + $ret .= ""; + } + + $ret .= ""; - return $ret; + return $ret; } else { return "$getlist"; @@ -1244,6 +1139,7 @@ sub DbRep_Attr($$$$) { if ($a =~ /^current$|^previous$/ && $b =~ /^hour$|^day$|^week$|^month$|^year$/ && $c =~ /^begin$|^end$/) { delete($attr{$name}{timeDiffToNow}) if ($attr{$name}{timeDiffToNow}); delete($attr{$name}{timeOlderThan}) if ($attr{$name}{timeOlderThan}); + delete($attr{$name}{timeYearPeriod}) if ($attr{$name}{timeYearPeriod}); return undef; } $aVal = DbRep_formatpicker($aVal); @@ -1260,6 +1156,7 @@ sub DbRep_Attr($$$$) { } delete($attr{$name}{timeDiffToNow}) if ($attr{$name}{timeDiffToNow}); delete($attr{$name}{timeOlderThan}) if ($attr{$name}{timeOlderThan}); + delete($attr{$name}{timeYearPeriod}) if ($attr{$name}{timeYearPeriod}); } if ($aName =~ /ftpTimeout|timeout|diffAccept/) { unless ($aVal =~ /^[0-9]+$/) { return " The Value of $aName is not valid. Use only figures 0-9 without decimal places !";} @@ -1272,7 +1169,6 @@ sub DbRep_Attr($$$$) { { return "The Value of \"$aName\" isn't valid. Set simple seconds like \"86400\" or use form like \"y:1 d:10 h:6 m:12 s:20\". Refer to commandref !";} delete($attr{$name}{timestamp_begin}) if ($attr{$name}{timestamp_begin}); delete($attr{$name}{timestamp_end}) if ($attr{$name}{timestamp_end}); - delete($attr{$name}{timeOlderThan}) if ($attr{$name}{timeOlderThan}); delete($attr{$name}{timeYearPeriod}) if ($attr{$name}{timeYearPeriod}); } if ($aName eq "timeOlderThan") { @@ -1280,7 +1176,6 @@ sub DbRep_Attr($$$$) { { return "The Value of \"$aName\" isn't valid. Set simple seconds like \"86400\" or use form like \"y:1 d:10 h:6 m:12 s:20\". Refer to commandref !";} delete($attr{$name}{timestamp_begin}) if ($attr{$name}{timestamp_begin}); delete($attr{$name}{timestamp_end}) if ($attr{$name}{timestamp_end}); - delete($attr{$name}{timeDiffToNow}) if ($attr{$name}{timeDiffToNow}); delete($attr{$name}{timeYearPeriod}) if ($attr{$name}{timeYearPeriod}); } if ($aName eq "dumpMemlimit" || $aName eq "dumpSpeed") { @@ -1478,7 +1373,7 @@ sub DbRep_getMinTs($) { eval { $mints = $dbh->selectrow_array("SELECT min(TIMESTAMP) FROM history;"); }; # eval { $mints = $dbh->selectrow_array("select TIMESTAMP from history limit 1;"); }; - # eval { $mints = $dbh->selectrow_array("select TIMESTAMP from history order by TIMESTAMP limit 1;"); }; + # eval { $mints = $dbh->selectrow_array("select TIMESTAMP from history order by TIMESTAMP limit 1;"); }; $dbh->disconnect; @@ -1576,6 +1471,7 @@ sub DbRep_Main($$;$) { $hash->{HELPER}{RUNNING_BCKPREST_SERVER} || $hash->{HELPER}{RUNNING_RESTORE} || $hash->{HELPER}{RUNNING_REPAIR} || + $hash->{HELPER}{RUNNING_REDUCELOG} || $hash->{HELPER}{RUNNING_OPTIMIZE}) && $opt !~ /dumpMySQL|restoreMySQL|dumpSQLite|restoreSQLite|optimizeTables|vacuum|repairSQLite/ ); @@ -1669,7 +1565,7 @@ sub DbRep_Main($$;$) { Log3 ($name, 4, "DbRep $name - Timestamp end human readable: not set") if($opt !~ /tableCurrentPurge/); } - Log3 ($name, 4, "DbRep $name - Aggregation: $aggregation") if($opt !~ /tableCurrentPurge|tableCurrentFillup|fetchrows|insert/); + Log3 ($name, 4, "DbRep $name - Aggregation: $aggregation") if($opt !~ /tableCurrentPurge|tableCurrentFillup|fetchrows|insert|reduceLog/); ##### Funktionsaufrufe ##### if ($opt eq "sumValue") { @@ -1750,6 +1646,13 @@ sub DbRep_Main($$;$) { DbRep_beforeproc($hash, "syncStandby"); $hash->{HELPER}{RUNNING_PID} = BlockingCall("DbRep_syncStandby", "$name§$device§$reading§$runtime_string_first§$runtime_string_next§$ts§$prop", "DbRep_syncStandbyDone", $to, "DbRep_ParseAborted", $hash); } + + if ($opt =~ /reduceLog/) { + $hash->{HELPER}{RUNNING_REDUCELOG} = BlockingCall("DbRep_reduceLog", "$name|$runtime_string_first|$runtime_string_next", "DbRep_reduceLogDone", $to, "DbRep_reduceLogAborted", $hash); + ReadingsSingleUpdateValue ($hash, "state", "reduceLog database is running - be patient and see Logfile !", 1); + $hash->{HELPER}{RUNNING_REDUCELOG}{loglevel} = 5 if($hash->{HELPER}{RUNNING_REDUCELOG}); # Forum #77057 + return; + } $hash->{HELPER}{RUNNING_PID}{loglevel} = 5 if($hash->{HELPER}{RUNNING_PID}); # Forum #77057 return; @@ -2659,7 +2562,8 @@ sub count_DoParse($) { my $dbuser = $dbloghash->{dbuser}; my $dblogname = $dbloghash->{NAME}; my $dbpassword = $attr{"sec$dblogname"}{secret}; - my ($dbh,$sql,$sth,$err,$selspec); + my $ced = AttrVal($name,"countEntriesDetail",0); + my ($dbh,$sql,$sth,$err); # Background-Startzeit my $bst = [gettimeofday]; @@ -2681,22 +2585,38 @@ sub count_DoParse($) { # Timestampstring to Array my @ts = split("\\|", $ts); Log3 ($name, 5, "DbRep $name - Timestamp-Array: \n@ts"); - + # SQL-Startzeit my $st = [gettimeofday]; # DB-Abfrage zeilenweise für jeden Timearray-Eintrag - my $arrstr; + my ($arrstr,@rsf,$ttail); + my $addon = ''; + my $selspec = "COUNT(*)"; + if($ced) { + $addon = "group by READING"; + $selspec = "READING, COUNT(*)"; + } + foreach my $row (@ts) { my @a = split("#", $row); my $runtime_string = $a[0]; my $runtime_string_first = $a[1]; - my $runtime_string_next = $a[2]; + my $runtime_string_next = $a[2]; + my $tc = 0; + + if($aggregation eq "hour") { + @rsf = split(/[" "\|":"]/,$runtime_string_first); + $ttail = $rsf[0]."_".$rsf[1]."|"; + } else { + @rsf = split(" ",$runtime_string_first); + $ttail = $rsf[0]."|"; + } if ($IsTimeSet || $IsAggrSet) { - $sql = DbRep_createSelectSql($hash,$table,"COUNT(*)",$device,$reading,"'$runtime_string_first'","'$runtime_string_next'",''); + $sql = DbRep_createSelectSql($hash,$table,$selspec,$device,$reading,"'$runtime_string_first'","'$runtime_string_next'",$addon); } else { - $sql = DbRep_createSelectSql($hash,$table,"COUNT(*)",$device,$reading,undef,undef,''); + $sql = DbRep_createSelectSql($hash,$table,$selspec,$device,$reading,undef,undef,$addon); } Log3 ($name, 4, "DbRep $name - SQL execute: $sql"); @@ -2709,19 +2629,21 @@ sub count_DoParse($) { $dbh->disconnect; return "$name|''|$device|$reading|''|$err|$table"; } - - # DB-Abfrage -> Ergebnis in @arr aufnehmen - my @line = $sth->fetchrow_array(); - Log3 ($name, 5, "DbRep $name - SQL result: $line[0]") if($line[0]); - - if($aggregation eq "hour") { - my @rsf = split(/[" "\|":"]/,$runtime_string_first); - $arrstr .= $runtime_string."#".$line[0]."#".$rsf[0]."_".$rsf[1]."|"; + if($ced) { + # detaillierter Readings-Count + while (my @line = $sth->fetchrow_array()) { + Log3 ($name, 5, "DbRep $name - SQL result: @line"); + $tc += $line[1] if($line[1]); # total count für Reading + $arrstr .= $runtime_string."#".$line[0]."#".$line[1]."#".$ttail; + } + # total count (über alle selected Readings) für Zeitabschnitt einfügen + $arrstr .= $runtime_string."#"."ALLREADINGS"."#".$tc."#".$ttail; } else { - my @rsf = split(" ",$runtime_string_first); - $arrstr .= $runtime_string."#".$line[0]."#".$rsf[0]."|"; - } + my @line = $sth->fetchrow_array(); + Log3 ($name, 5, "DbRep $name - SQL result: $line[0]") if($line[0]); + $arrstr .= $runtime_string."#"."ALLREADINGS"."#".$line[0]."#".$ttail; + } } $sth->finish; @@ -2738,7 +2660,7 @@ sub count_DoParse($) { $rt = $rt.",".$brt; - return "$name|$arrstr|$device|$reading|$rt|0|$table"; + return "$name|$arrstr|$device|$rt|0|$table"; } #################################################################################################### @@ -2752,12 +2674,10 @@ sub count_ParseDone($) { my $arrstr = decode_base64($a[1]); my $device = $a[2]; $device =~ s/[^A-Za-z\/\d_\.-]/\//g; - my $reading = $a[3]; - $reading =~ s/[^A-Za-z\/\d_\.-]/\//g; - my $bt = $a[4]; + my $bt = $a[3]; my ($rt,$brt) = split(",", $bt); - my $err = $a[5]?decode_base64($a[5]):undef; - my $table = $a[6]; + my $err = $a[4]?decode_base64($a[4]):undef; + my $table = $a[5]; my $reading_runtime_string; if ($err) { @@ -2777,10 +2697,12 @@ sub count_ParseDone($) { my @arr = split("\\|", $arrstr); foreach my $row (@arr) { - my @a = split("#", $row); - my $runtime_string = $a[0]; - my $c = $a[1]; - my $rsf = $a[2]."__"; + my @a = split("#", $row); + my $runtime_string = $a[0]; + my $reading = $a[1]; + $reading =~ s/[^A-Za-z\/\d_\.-]/\//g; + my $c = $a[2]; + my $rsf = $a[3]."__"; if (AttrVal($hash->{NAME}, "readingNameMap", "")) { $reading_runtime_string = $rsf.AttrVal($hash->{NAME}, "readingNameMap", "")."__".$runtime_string; @@ -7573,6 +7495,406 @@ sub DbRep_syncStandbyDone($) { return; } +#################################################################################################### +# reduceLog - Historische Werte ausduennen non-blocking > Forum #41089 +# +# $ots - reduce Logs älter als: Attribut "timeOlderThan" oder "timestamp_begin" +# $nts - reduce Logs neuer als: Attribut "timeDiffToNow" oder "timestamp_end" +#################################################################################################### +sub DbRep_reduceLog($) { + my ($string) = @_; + my ($name,$nts,$ots) = split("\\|", $string); + my $hash = $defs{$name}; + my $dbloghash = $hash->{dbloghash}; + my $dbconn = $dbloghash->{dbconn}; + my $dbuser = $dbloghash->{dbuser}; + my $dblogname = $dbloghash->{NAME}; + my $dbmodel = $dbloghash->{MODEL}; + my $dbpassword = $attr{"sec$dblogname"}{secret}; + my @a = @{$hash->{HELPER}{REDUCELOG}}; + my $utf8 = defined($hash->{UTF8})?$hash->{UTF8}:0; + delete $hash->{HELPER}{REDUCELOG}; + my ($ret,$row,$filter,$exclude,$c,$day,$hour,$lastHour,$updDate,$updHour,$average,$processingDay,$lastUpdH,%hourlyKnown,%averageHash,@excludeRegex,@dayRows,@averageUpd,@averageUpdD); + my ($startTime,$currentHour,$currentDay,$deletedCount,$updateCount,$sum,$rowCount,$excludeCount) = (time(),99,0,0,0,0,0,0); + my ($dbh,$err,$brt); + + Log3 ($name, 5, "DbRep $name -> Start DbLog_reduceLog"); + + eval {$dbh = DBI->connect("dbi:$dbconn", $dbuser, $dbpassword, { PrintError => 0, RaiseError => 1, AutoInactiveDestroy => 1, mysql_enable_utf8 => $utf8 });}; + + if ($@) { + $err = encode_base64($@,""); + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + return "$name|''|$err|''"; + } + + if ($a[-1] =~ /^EXCLUDE=(.+:.+)+/i) { + ($filter) = $a[-1] =~ /^EXCLUDE=(.+)/i; + @excludeRegex = split(',',$filter); + } elsif ($a[-1] =~ /^INCLUDE=.+:.+$/i) { + $filter = 1; + } + if (defined($a[2])) { + $average = ($a[2] =~ /average=day/i) ? "AVERAGE=DAY" : ($a[2] =~ /average/i) ? "AVERAGE=HOUR" : 0; + } + + Log3 ($name, 3, "DbRep $name - reduce data older than: $ots, newer than: $nts"); + Log3 ($name, 3, "DbRep $name - reduceLog requested with options: " + .(($average) ? "$average" : '') + .(($average && $filter) ? ", " : '').(($filter) ? uc((split('=',$a[-1]))[0]).'='.(split('=',$a[-1]))[1] : '')); + + if ($ots) { + my ($sth_del, $sth_upd, $sth_delD, $sth_updD, $sth_get); + eval { $sth_del = $dbh->prepare_cached("DELETE FROM history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); + $sth_upd = $dbh->prepare_cached("UPDATE history SET TIMESTAMP=?, EVENT=?, VALUE=? WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?) AND (VALUE=?)"); + $sth_delD = $dbh->prepare_cached("DELETE FROM history WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?)"); + $sth_updD = $dbh->prepare_cached("UPDATE history SET TIMESTAMP=?, EVENT=?, VALUE=? WHERE (DEVICE=?) AND (READING=?) AND (TIMESTAMP=?)"); + $sth_get = $dbh->prepare("SELECT TIMESTAMP,DEVICE,'',READING,VALUE FROM history WHERE " + .($a[-1] =~ /^INCLUDE=(.+):(.+)$/i ? "DEVICE like '$1' AND READING like '$2' AND " : '') + ."TIMESTAMP < '$ots'".($nts?" AND TIMESTAMP >= '$nts' ":" ")."ORDER BY TIMESTAMP ASC"); # '' was EVENT, no longer in use + }; + if ($@) { + $err = encode_base64($@,""); + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + return "$name|''|$err|''"; + } + + eval { $sth_get->execute(); }; + if ($@) { + $err = encode_base64($@,""); + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + return "$name|''|$err|''"; + } + + do { + $row = $sth_get->fetchrow_arrayref || ['0000-00-00 00:00:00','D','','R','V']; # || execute last-day dummy + $ret = 1; + ($day,$hour) = $row->[0] =~ /-(\d{2})\s(\d{2}):/; + $rowCount++ if($day != 00); + if ($day != $currentDay) { + if ($currentDay) { # false on first executed day + if (scalar @dayRows) { + ($lastHour) = $dayRows[-1]->[0] =~ /(.*\d+\s\d{2}):/; + $c = 0; + for my $delRow (@dayRows) { + $c++ if($day != 00 || $delRow->[0] !~ /$lastHour/); + } + if($c) { + $deletedCount += $c; + Log3 ($name, 3, "DbRep $name - reduceLog deleting $c records of day: $processingDay"); + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; + eval {$dbh->begin_work() if($dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + eval { + my $i = 0; + my $k = 1; + my $th = ($#dayRows <= 2000)?100:($#dayRows <= 30000)?1000:10000; + for my $delRow (@dayRows) { + if($day != 00 || $delRow->[0] !~ /$lastHour/) { + Log3 ($name, 4, "DbRep $name - DELETE FROM history WHERE (DEVICE=$delRow->[1]) AND (READING=$delRow->[3]) AND (TIMESTAMP=$delRow->[0]) AND (VALUE=$delRow->[4])"); + $sth_del->execute(($delRow->[1], $delRow->[3], $delRow->[0], $delRow->[4])); + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3 ($name, 3, "DbRep $name - reduceLog deletion progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } + } + } + }; + if ($@) { + $err = $@; + Log3 ($name, 2, "DbRep $name - reduceLog ! FAILED ! for day $processingDay: $err"); + eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + $ret = 0; + } else { + eval {$dbh->commit() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + } + $dbh->{RaiseError} = 0; + $dbh->{PrintError} = 1; + } + @dayRows = (); + } + + if ($ret && defined($a[3]) && $a[3] =~ /average/i) { + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; + eval {$dbh->begin_work() if($dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + eval { + push(@averageUpd, {%hourlyKnown}) if($day != 00); + + $c = 0; + for my $hourHash (@averageUpd) { # Only count for logging... + for my $hourKey (keys %$hourHash) { + $c++ if ($hourHash->{$hourKey}->[0] && scalar(@{$hourHash->{$hourKey}->[4]}) > 1); + } + } + $updateCount += $c; + Log3 ($name, 3, "DbRep $name - reduceLog (hourly-average) updating $c records of day: $processingDay") if($c); # else only push to @averageUpdD + + my $i = 0; + my $k = 1; + my $th = ($c <= 2000)?100:($c <= 30000)?1000:10000; + for my $hourHash (@averageUpd) { + for my $hourKey (keys %$hourHash) { + if ($hourHash->{$hourKey}->[0]) { # true if reading is a number + ($updDate,$updHour) = $hourHash->{$hourKey}->[0] =~ /(.*\d+)\s(\d{2}):/; + if (scalar(@{$hourHash->{$hourKey}->[4]}) > 1) { # true if reading has multiple records this hour + for (@{$hourHash->{$hourKey}->[4]}) { $sum += $_; } + $average = sprintf('%.3f', $sum/scalar(@{$hourHash->{$hourKey}->[4]}) ); + $sum = 0; + Log3 ($name, 4, "DbRep $name - UPDATE history SET TIMESTAMP=$updDate $updHour:30:00, EVENT='rl_av_h', VALUE=$average WHERE DEVICE=$hourHash->{$hourKey}->[1] AND READING=$hourHash->{$hourKey}->[3] AND TIMESTAMP=$hourHash->{$hourKey}->[0] AND VALUE=$hourHash->{$hourKey}->[4]->[0]"); + $sth_upd->execute(("$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[4]->[0])); + + $i++; + if($i == $th) { + my $prog = $k * $i; + Log3 ($name, 3, "DbRep $name - reduceLog (hourly-average) updating progress of day: $processingDay is: $prog"); + $i = 0; + $k++; + } + push(@averageUpdD, ["$updDate $updHour:30:00", 'rl_av_h', $average, $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); + } else { + push(@averageUpdD, [$hourHash->{$hourKey}->[0], $hourHash->{$hourKey}->[2], $hourHash->{$hourKey}->[4]->[0], $hourHash->{$hourKey}->[1], $hourHash->{$hourKey}->[3], $updDate]) if (defined($a[3]) && $a[3] =~ /average=day/i); + } + } + } + } + }; + if ($@) { + $err = $@; + Log3 ($name, 2, "DbRep $name - reduceLog average=hour ! FAILED ! for day $processingDay: $err"); + eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + @averageUpdD = (); + } else { + eval {$dbh->commit() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + } + $dbh->{RaiseError} = 0; + $dbh->{PrintError} = 1; + @averageUpd = (); + } + + if (defined($a[3]) && $a[3] =~ /average=day/i && scalar(@averageUpdD) && $day != 00) { + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; + eval {$dbh->begin_work() if($dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + eval { + for (@averageUpdD) { + push(@{$averageHash{$_->[3].$_->[4]}->{tedr}}, [$_->[0], $_->[1], $_->[3], $_->[4]]); + $averageHash{$_->[3].$_->[4]}->{sum} += $_->[2]; + $averageHash{$_->[3].$_->[4]}->{date} = $_->[5]; + } + + $c = 0; + for (keys %averageHash) { + if(scalar @{$averageHash{$_}->{tedr}} == 1) { + delete $averageHash{$_}; + } else { + $c += (scalar(@{$averageHash{$_}->{tedr}}) - 1); + } + } + $deletedCount += $c; + $updateCount += keys(%averageHash); + + my ($id,$iu) = 0; + my ($kd,$ku) = 1; + my $thd = ($c <= 2000)?100:($c <= 30000)?1000:10000; + my $thu = ((keys %averageHash) <= 2000)?100:((keys %averageHash) <= 30000)?1000:10000; + Log3 ($name, 3, "DbRep $name - reduceLog (daily-average) updating ".(keys %averageHash).", deleting $c records of day: $processingDay") if(keys %averageHash); + for my $reading (keys %averageHash) { + $average = sprintf('%.3f', $averageHash{$reading}->{sum}/scalar(@{$averageHash{$reading}->{tedr}})); + $lastUpdH = pop @{$averageHash{$reading}->{tedr}}; + for (@{$averageHash{$reading}->{tedr}}) { + Log3 ($name, 5, "DbRep $name - DELETE FROM history WHERE DEVICE='$_->[2]' AND READING='$_->[3]' AND TIMESTAMP='$_->[0]'"); + $sth_delD->execute(($_->[2], $_->[3], $_->[0])); + + $id++; + if($id == $thd) { + my $prog = $kd * $id; + Log3 ($name, 3, "DbRep $name - reduceLog (daily-average) deleting progress of day: $processingDay is: $prog"); + $id = 0; + $kd++; + } + } + Log3 ($name, 4, "DbRep $name - UPDATE history SET TIMESTAMP=$averageHash{$reading}->{date} 12:00:00, EVENT='rl_av_d', VALUE=$average WHERE (DEVICE=$lastUpdH->[2]) AND (READING=$lastUpdH->[3]) AND (TIMESTAMP=$lastUpdH->[0])"); + $sth_updD->execute(($averageHash{$reading}->{date}." 12:00:00", 'rl_av_d', $average, $lastUpdH->[2], $lastUpdH->[3], $lastUpdH->[0])); + + $iu++; + if($iu == $thu) { + my $prog = $ku * $id; + Log3 ($name, 3, "DbRep $name - reduceLog (daily-average) updating progress of day: $processingDay is: $prog"); + $iu = 0; + $ku++; + } + } + }; + if ($@) { + Log3 ($name, 3, "DbRep $name - reduceLog average=day ! FAILED ! for day $processingDay"); + eval {$dbh->rollback() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + } else { + eval {$dbh->commit() if(!$dbh->{AutoCommit});}; + if ($@) { + Log3 ($name, 2, "DbRep $name - DbRep_reduceLog - $@"); + } + } + $dbh->{RaiseError} = 0; + $dbh->{PrintError} = 1; + } + %averageHash = (); + %hourlyKnown = (); + @averageUpd = (); + @averageUpdD = (); + $currentHour = 99; + } + $currentDay = $day; + } + + if ($hour != $currentHour) { # forget records from last hour, but remember these for average + if (defined($a[3]) && $a[3] =~ /average/i && keys(%hourlyKnown)) { + push(@averageUpd, {%hourlyKnown}); + } + %hourlyKnown = (); + $currentHour = $hour; + } + if (defined $hourlyKnown{$row->[1].$row->[3]}) { # remember first readings for device per h, other can be deleted + push(@dayRows, [@$row]); + if (defined($a[3]) && $a[3] =~ /average/i && defined($row->[4]) && $row->[4] =~ /^-?(?:\d+(?:\.\d*)?|\.\d+)$/ && $hourlyKnown{$row->[1].$row->[3]}->[0]) { + if ($hourlyKnown{$row->[1].$row->[3]}->[0]) { + push(@{$hourlyKnown{$row->[1].$row->[3]}->[4]}, $row->[4]); + } + } + } else { + $exclude = 0; + for (@excludeRegex) { + $exclude = 1 if("$row->[1]:$row->[3]" =~ /^$_$/); + } + if ($exclude) { + $excludeCount++ if($day != 00); + } else { + $hourlyKnown{$row->[1].$row->[3]} = (defined($row->[4]) && $row->[4] =~ /^-?(?:\d+(?:\.\d*)?|\.\d+)$/) ? [$row->[0],$row->[1],$row->[2],$row->[3],[$row->[4]]] : [0]; + } + } + $processingDay = (split(' ',$row->[0]))[0]; + + } while( $day != 00 ); + + $brt = sprintf('%.2f',time() - $startTime); + my $result = "Rows processed: $rowCount, deleted: $deletedCount" + .((defined($a[3]) && $a[3] =~ /average/i)? ", updated: $updateCount" : '') + .(($excludeCount)? ", excluded: $excludeCount" : ''); + Log3 ($name, 3, "DbRep $name - reduceLog finished. $result"); + $ret = $result; + $ret = "reduceLog finished. $result"; + } else { + $err = "reduceLog needs at least one of attributes \"timeOlderThan\", \"timeDiffToNow\", \"timestamp_begin\" or \"timestamp_end\" to be set"; + Log3 ($name, 2, "DbRep $name - ERROR - $err"); + $err = encode_base64($err,""); + return "$name|''|$err|''"; + } + + $dbh->disconnect(); + $ret = encode_base64($ret,""); + Log3 ($name, 5, "DbRep $name -> DbRep_reduceLogNbl finished"); + +return "$name|$ret|0|$brt"; +} + +#################################################################################################### +# reduceLog non-blocking Rückkehrfunktion +#################################################################################################### +sub DbRep_reduceLogDone($) { + my ($string) = @_; + my @a = split("\\|",$string); + my $name = $a[0]; + my $hash = $defs{$name}; + my $ret = decode_base64($a[1]); + my $err = decode_base64($a[2]) if ($a[2]); + my $brt = $a[3]; + my $dbloghash = $hash->{dbloghash}; + my $erread; + + delete $hash->{HELPER}{RUNNING_REDUCELOG}; + + if ($err) { + ReadingsSingleUpdateValue ($hash, "errortext", $err, 1); + ReadingsSingleUpdateValue ($hash, "state", "error", 1); + return; + } + + # only for this block because of warnings if details of readings are not set + no warnings 'uninitialized'; + + readingsBeginUpdate($hash); + ReadingsBulkUpdateValue($hash, "background_processing_time", sprintf("%.4f",$brt)); + ReadingsBulkUpdateValue($hash, "reduceLogState", $ret); + readingsEndUpdate($hash, 1); + + # Befehl nach Procedure ausführen + $erread = DbRep_afterproc($hash, "reduceLog"); + + my $state = $erread?$erread:"reduceLog of $hash->{DATABASE} finished"; + readingsBeginUpdate($hash); + ReadingsBulkUpdateTimeState($hash,undef,undef,$state); + readingsEndUpdate($hash, 1); + + use warnings; + +return; +} + +#################################################################################################### +# Abbruchroutine Timeout reduceLog +#################################################################################################### +sub DbRep_reduceLogAborted(@) { + my ($hash,$cause) = @_; + my $name = $hash->{NAME}; + my $dbh = $hash->{DBH}; + my $erread; + + $cause = $cause?$cause:"Timeout: process terminated"; + Log3 ($name, 1, "DbRep $name - BlockingCall $hash->{HELPER}{RUNNING_REDUCELOG}{fn} pid:$hash->{HELPER}{RUNNING_REDUCELOG}{pid} $cause") if($hash->{HELPER}{RUNNING_REDUCELOG}); + + # Befehl nach Procedure ausführen + no warnings 'uninitialized'; + $erread = DbRep_afterproc($hash, "reduceLog"); + $erread = ", ".(split("but", $erread))[1] if($erread); + + my $state = $cause.$erread; + $dbh->disconnect() if(defined($dbh)); + ReadingsSingleUpdateValue ($hash, "state", $state, 1); + + Log3 ($name, 2, "DbRep $name - Database reduceLog aborted due to \"$cause\" "); + + delete($hash->{HELPER}{RUNNING_REDUCELOG}); + +return; +} + #################################################################################################### # Abbruchroutine Timeout Restore #################################################################################################### @@ -7594,9 +7916,10 @@ sub DbRep_restoreAborted(@) { $dbh->disconnect() if(defined($dbh)); ReadingsSingleUpdateValue ($hash, "state", $state, 1); - Log3 ($name, 2, "DbRep $name - Database restore aborted by \"$cause\" "); + Log3 ($name, 2, "DbRep $name - Database restore aborted due to \"$cause\" "); delete($hash->{HELPER}{RUNNING_RESTORE}); + return; } @@ -7646,7 +7969,7 @@ sub DbRep_DumpAborted(@) { $dbh->disconnect() if(defined($dbh)); ReadingsSingleUpdateValue ($hash, "state", $state, 1); - Log3 ($name, 2, "DbRep $name - Database dump aborted by \"$cause\" "); + Log3 ($name, 2, "DbRep $name - Database dump aborted due to \"$cause\" "); delete($hash->{HELPER}{RUNNING_BACKUP_CLIENT}); delete($hash->{HELPER}{RUNNING_BCKPREST_SERVER}); @@ -7674,7 +7997,7 @@ sub DbRep_OptimizeAborted(@) { $dbh->disconnect() if(defined($dbh)); ReadingsSingleUpdateValue ($hash, "state", $state, 1); - Log3 ($name, 2, "DbRep $name - Database optimize aborted by \"$cause\" "); + Log3 ($name, 2, "DbRep $name - Database optimize aborted due to \"$cause\" "); delete($hash->{HELPER}{RUNNING_OPTIMIZE}); return; @@ -7880,7 +8203,7 @@ sub DbRep_checktimeaggr ($) { $aggregation = "day"; # für Tagesmittelwertberechnung des deutschen Wetterdienstes immer "day" $IsAggrSet = 1; } - if($hash->{LASTCMD} =~ /delEntries|fetchrows|deviceRename|readingRename|tableCurrentFillup/) { + if($hash->{LASTCMD} =~ /delEntries|fetchrows|deviceRename|readingRename|tableCurrentFillup|reduceLog/) { $IsAggrSet = 0; $aggregation = "no"; } @@ -9480,6 +9803,7 @@ return;
  • delete consecutive datasets with different timestamp but same values (clearing up consecutive doublets)
  • Repair of a corrupted SQLite database ("database disk image is malformed")
  • transmission of datasets from source database into another (Standby) database (syncStandby)
  • +
  • reduce the amount of datasets in database (reduceLog)

  • @@ -9661,11 +9985,30 @@ return;
    -
  • countEntries [history|current] - provides the number of table-entries (default: history) between period set - by timestamp-attributes if set. - If timestamp-attributes are not set, all entries of the table will be count. +
  • countEntries [history|current] - provides the number of table entries (default: history) between time period set + by time.* -attributes if set. + If time.* attributes not set, all entries of the table will be count. The attributes "device" and "reading" can be used to - limit the evaluation.

  • + limit the evaluation.
    + By default the summary of all counted datasets, labeled by "ALLREADINGS", will be created. + If the attribute "countEntriesDetail" is set, the number of every reading + is reported additionally.

    + + The relevant attributes for this function are:

    + + +
    + +
  • delEntries - deletes all database entries or only the database entries specified by attributes Device and/or @@ -10123,7 +10466,7 @@ return; device : select only datasets which are contain <device> reading : select only datasets which are contain <reading> time.* : A number of attributes to limit selection by time - valueFilter : filter datasets of database field "VALUE" by a regular expression + valueFilter : Filter datasets which are to show by a regular expression. The regex is applied to the whole selected dataset.
    @@ -10646,7 +10989,16 @@ return $ret;


  • -
    + +
  • versionNotes [hints | rel | <key>] - + Shows realease informations and/or hints about the module. It contains only main release + informations for module users.
    + If no options are specified, both release informations and hints will be shown. "rel" shows + only release informations and "hints" shows only hints. By the <key>-specification only + the hint with the specified number is shown. +
  • +
    + @@ -10657,7 +11009,9 @@ return $ret;
    @@ -11370,6 +11782,7 @@ sub bdump {
  • Bereinigung sequentiell aufeinander folgender Datensätze mit unterschiedlichen Zeitstempel aber gleichen Werten (sequentielle Dublettenbereinigung)
  • Reparatur einer korrupten SQLite Datenbank ("database disk image is malformed")
  • Übertragung von Datensätzen aus der Quelldatenbank in eine andere (Standby) Datenbank (syncStandby)
  • +
  • Reduktion der Anzahl von Datensätzen in der Datenbank (reduceLog)

  • @@ -11555,9 +11968,28 @@ sub bdump {
  • countEntries [history | current] - liefert die Anzahl der Tabelleneinträge (default: history) in den gegebenen Zeitgrenzen (siehe Attribute). - Sind die Timestamps nicht gesetzt werden alle Einträge gezählt. + Sind die Timestamps nicht gesetzt, werden alle Einträge der Tabelle gezählt. Beschränkungen durch die Attribute Device bzw. Reading - gehen in die Selektion mit ein.

  • + gehen in die Selektion mit ein.
    + Standardmäßig wird die Summe aller Datensätze, gekennzeichnet mit "ALLREADINGS", erstellt. + Ist das Attribut "countEntriesDetail" gesetzt, wird die Anzahl jedes einzelnen Readings + zusätzlich ausgegeben.

    + + Die für diese Funktion relevanten Attribute sind:

    + +
      + + + + + + + +
      aggregation : Zusammenfassung/Gruppierung von Zeitintervallen
      countEntriesDetail: detaillierte Ausgabe der Datensatzanzahl
      device : Selektion nur von Datensätzen die <device> enthalten
      reading : Selektion nur von Datensätzen die <reading> enthalten
      time.* : eine Reihe von Attributen zur Zeitabgrenzung
      +
    +
    + +
  • delEntries - löscht alle oder die durch die Attribute device und/oder reading definierten Datenbankeinträge. Die Eingrenzung über Timestamps erfolgt @@ -11974,7 +12406,7 @@ sub bdump { Jedes Ergebnisreading setzt sich aus dem Timestring des Datensatzes, einem Index, dem Device und dem Reading zusammen. - Die Funktion fetchrows ist in der Lage mehrfach vorkommende Datensätze (Dubletten) zu erkennen. + Die Funktion fetchrows ist in der Lage, mehrfach vorkommende Datensätze (Dubletten) zu erkennen. Solche Dubletten sind mit einem Index > 1 gekennzeichnet.
    Dubletten können mit dem Attribut "fetchMarkDuplicates" farblich hervorgehoben werden.

    @@ -12008,13 +12440,13 @@ sub bdump {
      - + - +
      fetchRoute : Leserichtung des Selekts innerhalb der Datenbank
      fetchRoute : Leserichtung der Selektion innerhalb der Datenbank
      limit : begrenzt die Anzahl zu selektierenden bzw. anzuzeigenden Datensätze
      fetchMarkDuplicates : Hervorhebung von gefundenen Dubletten
      device : Selektion nur von Datensätzen die <device> enthalten
      reading : Selektion nur von Datensätzen die <reading> enthalten
      time.* : eine Reihe von Attributen zur Zeitabgrenzung
      valueFilter : filtert Datensätze des Datenbankfeldes "VALUE" mit einem regulären Ausdruck
      valueFilter : filtert die anzuzeigenden Datensätze mit einem regulären Ausdruck. Der Regex wird auf den gesamten anzuzeigenden Datensatz angewendet.

    @@ -12167,7 +12599,72 @@ sub bdump { Obwohl die Funktion selbst non-blocking ausgelegt ist, sollte das zugeordnete DbLog-Device im asynchronen Modus betrieben werden um ein Blockieren von FHEMWEB zu vermeiden (Tabellen-Lock).


  • - + + +
  • reduceLog [average[=day]] [exclude=device1:reading1,device2:reading2,...] [include=device:reading]
    + Reduziert historische Datensätze innerhalb der durch die "time.*"-Attribute bestimmten + Zeitgrenzen auf einen Eintrag (den ersten) pro Stunde je Device & Reading.
    + Es muss mindestens eines der "time.*"-Attribute gesetzt sein (siehe Tabelle unten). + Die jeweils fehlende Zeitabgrenzung wird in diesem Fall durch das Modul errechnet. +

    + + Die für diese Funktion relevanten Attribute sind:

    +
      + + + + + + + + +
      executeBeforeProc : FHEM Kommando (oder perl-Routine) vor dem Export ausführen
      executeAfterProc : FHEM Kommando (oder perl-Routine) nach dem Export ausführen
      timeOlderThan : es werden Datenbankeinträge älter als dieses Attribut reduziert
      timestamp_end : es werden Datenbankeinträge älter als dieses Attribut reduziert
      timeDiffToNow : es werden Datenbankeinträge neuer als dieses Attribut reduziert
      timestamp_begin : es werden Datenbankeinträge neuer als dieses Attribut reduziert
      +
    +
    + + Das Reading "reduceLogState" enthält das Ausführungsergebnis des letzten reduceLog-Befehls.

    + + Durch die optionale Angabe von 'average' wird nicht nur die Datenbank bereinigt, sondern + alle numerischen Werte einer Stunde werden auf einen einzigen Mittelwert reduziert.
    + Durch die optionale Angabe von 'average=day' wird nicht nur die Datenbank bereinigt, sondern + alle numerischen Werte eines Tages auf einen einzigen Mittelwert reduziert. + (impliziert 'average')

    + + Optional kann als letzer Parameter "exclude=device1:reading1,device2:reading2,...." + angegeben werden um device/reading Kombinationen von reduceLog auszuschließen.
    + Tipp: Wird "exclude=.*:.*" angegeben, wird nichts in der Datenbank gelöscht. Das kann + z.B. verwendet werden um vorab die gesetzten Zeitgrenzen und die Anzahl der zu bearbeitenden + Datenbankeinträge zu checken.

    + + Optional kann als letzer Parameter "include=device:reading" angegeben werden um + die auf die Datenbank ausgeführte SELECT-Abfrage einzugrenzen, was die RAM-Belastung + verringert und die Performance erhöht.

    + +
      + Beispiel:

      + + attr <name> timeOlderThan = d:200
      + set <name> reduceLog
      + # Datensätze die älter als 200 Tage sind, werden auf den ersten Eintrag pro Stunde je Device & Reading + reduziert.
      +
      + + attr <name> timeDiffToNow = d:10
      + attr <name> timeOlderThan = d:5
      + set <name> reduceLog average include=Luftdaten_remote:%
      + # Datensätze die älter als 5 und neuer als 10 Tage sind, werden bereinigt. Numerische Werte + einer Stunde werden auf einen Mittelwert reduziert
      +
      +
    + + Hinweis:
    + Obwohl die Funktion selbst non-blocking ausgelegt ist, sollte das zugeordnete DbLog-Device + im asynchronen Modus betrieben werden um ein Blockieren von FHEMWEB zu vermeiden + (Tabellen-Lock).
    + Weiterhin wird dringend empfohlen den standard INDEX 'Search_Idx' in der Tabelle 'history' + anzulegen !
    + Die Abarbeitung dieses Befehls dauert unter Umständen (ohne INDEX) extrem lange.

    +

  • repairSQLite - repariert eine korrupte SQLite-Datenbank.
    Eine Korruption liegt im Allgemeinen vor wenn die Fehlermitteilung "database disk image is malformed" @@ -12228,7 +12725,7 @@ sub bdump { verbundenen Datenbank beginnt, aufgelistet .


  • -
  • sqlCmd - führt ein beliebiges Benutzer spezifisches Kommando aus.
    +
  • sqlCmd - führt ein beliebiges benutzerspezifisches Kommando aus.
    Enthält dieses Kommando eine Delete-Operation, muss zur Sicherheit das Attribut "allowDeletion" gesetzt sein.
    Bei der Ausführung dieses Kommandos werden keine Einschränkungen durch gesetzte Attribute @@ -12458,7 +12955,7 @@ sub bdump {
  • dbValue <SQL-Statement> - Führt das angegebene SQL-Statement blockierend aus. Diese Funktion ist durch ihre Arbeitsweise - speziell für den Einsatz in usereigenen Scripten geeignet.
    + speziell für den Einsatz in benutzerspezifischen Scripten geeignet.
    Die Eingabe akzeptiert Mehrzeiler und gibt ebenso mehrzeilige Ergebisse zurück. Werden mehrere Felder selektiert und zurückgegeben, erfolgt die Feldtrennung mit dem Trenner des Attributes "sqlResultFieldSep" (default "|"). Mehrere Ergebniszeilen @@ -12558,7 +13055,15 @@ return $ret; # Es werden nur Information der Tabellen "current" und "history" angezeigt


  • - + + +
  • versionNotes [hints | rel | <key>] - + Zeigt Release Informationen und/oder Hinweise zum Modul an. Es sind nur Release Informationen mit + Bedeutung für den Modulnutzer enthalten.
    + Sind keine Optionen angegben, werden sowohl Release Informationen als auch Hinweise angezeigt. + "rel" zeigt nur Release Informationen und "hints" nur Hinweise an. Mit der <key>-Angabe + wird der Hinweis mit der angegebenen Nummer angezeigt. +

  • @@ -12571,7 +13076,9 @@ return $ret;
      - Über die modulspezifischen Attribute wird die Abgrenzung der Auswertung und die Aggregation der Werte gesteuert.

      + Über die modulspezifischen Attribute wird die Abgrenzung der Auswertung und die Aggregation der Werte gesteuert.
      + Die hier aufgeführten Attribute sind nicht für jede Funktion des Moduls bedeutsam. In der Hilfe zu den set/get-Kommandos + wird explizit angegeben, welche Attribute für das jeweilige Kommando relevant sind.

      Hinweis zur SQL-Wildcard Verwendung:
      Innerhalb der Attribut-Werte für "device" und "reading" kann SQL-Wildcards "%" angegeben werden. @@ -12604,16 +13111,22 @@ return $ret;
        - - - + + +
        avgArithmeticMean : es wird der arithmetische Mittelwert berechnet (default)
        avgDailyMeanGWS : berechnet die Tagesmitteltemperatur entsprechend den - Vorschriften des deutschen Wetterdienstes (siehe "helpful hints" mit Funktion get versionNotes).
        - Diese Variante verwendet automatisch die Aggregation "day".
        avgTimeWeightMean : berechnet den zeitgewichteten Mittelwert
        avgArithmeticMean : es wird der arithmetische Mittelwert berechnet (default)
        avgDailyMeanGWS : berechnet die Tagesmitteltemperatur entsprechend den + Vorschriften des deutschen Wetterdienstes (siehe "get <name> versionNotes 2").
        + Diese Variante verwendet automatisch die Aggregation "day".
        avgTimeWeightMean : berechnet den zeitgewichteten Mittelwert

      - + +
    • countEntriesDetail - Wenn gesetzt, erstellt die Funktion "countEntries" eine detallierte Ausgabe der Datensatzzahl + pro Reading und Zeitintervall. + Standardmäßig wird nur die Summe aller selektierten Datensätze ausgegeben. +

    • + +
    • device - Abgrenzung der DB-Selektionen auf ein bestimmtes Device.
      Es können Geräte-Spezifikationen (devspec) angegeben werden.
      Innerhalb von Geräte-Spezifikationen wird SQL-Wildcard (%) als normales ASCII-Zeichen gewertet. @@ -12690,8 +13203,9 @@ return $ret; Attribut "archivesort" wird berücksichtigt.

    • -
    • executeAfterProc - Es kann ein FHEM-Kommando angegeben werden welches nach dem Dump ausgeführt werden soll.
      - Funktionen sind in {} einzuschließen.

      +
    • executeAfterProc - Es kann ein FHEM-Kommando oder eine Perl-Funktion angegeben werden welche nach der + Befehlsabarbeitung ausgeführt werden soll.
      + Funktionen sind in {} einzuschließen.

        Beispiel:

        @@ -12714,15 +13228,16 @@ sub adump { -
      • executeBeforeProc - Es kann ein FHEM-Kommando angegeben werden welches vor dem Dump ausgeführt werden soll.
        - Funktionen sind in {} einzuschließen.

        +
      • executeBeforeProc - Es kann ein FHEM-Kommando oder eine Perl-Funktion angegeben werden welche vor der + Befehlsabarbeitung ausgeführt werden soll.
        + Funktionen sind in {} einzuschließen.

        -
          - Beispiel:

          - attr <name> executeBeforeProc set og_gz_westfenster on;
          - attr <name> executeBeforeProc {bdump ("<name>")}

          +
            + Beispiel:

            + attr <name> executeBeforeProc set og_gz_westfenster on;
            + attr <name> executeBeforeProc {bdump ("<name>")}

            - # "bdump" ist eine in 99_myUtils definierte Funktion.
            + # "bdump" ist eine in 99_myUtils definierte Funktion.
             sub bdump {
            @@ -13007,14 +13522,38 @@ sub bdump {
             							   

            -
          • timestamp_begin - der zeitliche Beginn für die Datenselektion (*)

          • +
          • timestamp_begin - der zeitliche Beginn für die Datenselektion

          • + + Das Format von Timestamp ist "YYYY-MM-DD HH:MM:SS". Für die Attribute "timestamp_begin", "timestamp_end" + kann ebenso eine der folgenden Eingaben verwendet werden. Dabei wird das timestamp-Attribut dynamisch belegt:

            +
              + current_year_begin : entspricht "<aktuelles Jahr>-01-01 00:00:00"
              + current_year_end : entspricht "<aktuelles Jahr>-12-31 23:59:59"
              + previous_year_begin : entspricht "<vorheriges Jahr>-01-01 00:00:00"
              + previous_year_end : entspricht "<vorheriges Jahr>-12-31 23:59:59"
              + current_month_begin : entspricht "<aktueller Monat erster Tag> 00:00:00"
              + current_month_end : entspricht "<aktueller Monat letzter Tag> 23:59:59"
              + previous_month_begin : entspricht "<Vormonat erster Tag> 00:00:00"
              + previous_month_end : entspricht "<Vormonat letzter Tag> 23:59:59"
              + current_week_begin : entspricht "<erster Tag der akt. Woche> 00:00:00"
              + current_week_end : entspricht "<letzter Tag der akt. Woche> 23:59:59"
              + previous_week_begin : entspricht "<erster Tag Vorwoche> 00:00:00"
              + previous_week_end : entspricht "<letzter Tag Vorwoche> 23:59:59"
              + current_day_begin : entspricht "<aktueller Tag> 00:00:00"
              + current_day_end : entspricht "<aktueller Tag> 23:59:59"
              + previous_day_begin : entspricht "<Vortag> 00:00:00"
              + previous_day_end : entspricht "<Vortag> 23:59:59"
              + current_hour_begin : entspricht "<aktuelle Stunde>:00:00"
              + current_hour_end : entspricht "<aktuelle Stunde>:59:59"
              + previous_hour_begin : entspricht "<vorherige Stunde>:00:00"
              + previous_hour_end : entspricht "<vorherige Stunde>:59:59"
              +

          • timestamp_end - das zeitliche Ende für die Datenselektion. Wenn nicht gesetzt wird immer die aktuelle - Datum/Zeit-Kombi für das Ende der Selektion eingesetzt. (*)

          • + Datum/Zeit-Kombi für das Ende der Selektion eingesetzt.
            - - (*) Das Format von Timestamp ist wie in DbLog "YYYY-MM-DD HH:MM:SS". Für die Attribute "timestamp_begin", "timestamp_end" + Das Format von Timestamp ist "YYYY-MM-DD HH:MM:SS". Für die Attribute "timestamp_begin", "timestamp_end" kann ebenso eine der folgenden Eingaben verwendet werden. Dabei wird das timestamp-Attribut dynamisch belegt:

              current_year_begin : entspricht "<aktuelles Jahr>-01-01 00:00:00"
              @@ -13078,15 +13617,39 @@ sub bdump { attr <name> timeDiffToNow y:1.5
              # die Startzeit wird auf "aktuelle Zeit - 1,5 Jahre gesetzt
            -

            +
            + + Sind die Attribute "timeDiffToNow" und "timeOlderThan" gleichzeitig gesetzt, wird der + Selektionszeitraum zwischen diesen Zeitpunkten dynamisch kalkuliert. +

          • timeOlderThan - das Selektionsende wird auf den Zeitpunkt "<aktuelle Zeit> - <timeOlderThan>" gesetzt. Dadurch werden alle Datensätze bis zu dem Zeitpunkt "<aktuelle Zeit> - <timeOlderThan>" berücksichtigt (z.b. wenn auf 86400 gesetzt, werden alle Datensätze die älter als ein Tag sind berücksichtigt). Die Timestampermittlung erfolgt - dynamisch zum Ausführungszeitpunkt.
            - Es gelten die gleichen Eingabeformate wie für das Attribut "timeDiffToNow".

          • + dynamisch zum Ausführungszeitpunkt.
            + +
              + Eingabeformat Beispiel:
              + attr <name> timeOlderThan 86400
              + # das Selektionsende wird auf "aktuelle Zeit - 86400 Sekunden" gesetzt
              + attr <name> timeOlderThan d:2 h:3 m:2 s:10
              + # das Selektionsende wird auf "aktuelle Zeit - 2 Tage 3 Stunden 2 Minuten 10 Sekunden" gesetzt
              + attr <name> timeOlderThan m:600
              + # das Selektionsende wird auf "aktuelle Zeit - 600 Minuten" gesetzt
              + attr <name> timeOlderThan h:2.5
              + # das Selektionsende wird auf "aktuelle Zeit - 2,5 Stunden" gesetzt
              + attr <name> timeOlderThan y:1 h:2.5
              + # das Selektionsende wird auf "aktuelle Zeit - 1 Jahr und 2,5 Stunden" gesetzt
              + attr <name> timeOlderThan y:1.5
              + # das Selektionsende wird auf "aktuelle Zeit - 1,5 Jahre gesetzt
              +
            +
            + + Sind die Attribute "timeDiffToNow" und "timeOlderThan" gleichzeitig gesetzt, wird der + Selektionszeitraum zwischen diesen Zeitpunkten dynamisch kalkuliert. +

          • timeout - das Attribut setzt den Timeout-Wert für die Blocking-Call Routinen in Sekunden @@ -13138,8 +13701,9 @@ sub bdump {
          • valueFilter - Regulärer Ausdruck zur Filterung von Datensätzen innerhalb bestimmter Funktionen. Der - Regex auf den gesamten selektierten Datensatz (inkl. Device, Reading usw.) angewendet. - Bitte vergleichen sie die Erläuterungen zu den entsprechenden Set-Kommandos.

          • + Regex wird auf ein bestimmtes Feld oder den gesamten selektierten Datensatz (inkl. Device, + Reading usw.) angewendet. + Bitte beachten sie die Erläuterungen zu den entsprechenden Set-Kommandos.